aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
committerStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
commitc6a4f5e819217e1e12c458aed8e7b122e23a3a58 (patch)
tree81b7dd2bb4370a392f31d332a566c903b5744764 /test/CodeGen
parent19c6fbb3e8aaf74093afa08013134b61fa08f245 (diff)
downloadexternal_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.zip
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.gz
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.bz2
Update LLVM for rebase to r212749.
Includes a cherry-pick of: r212948 - fixes a small issue with atomic calls Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll55
-rw-r--r--test/CodeGen/AArch64/aarch64-address-type-promotion.ll28
-rw-r--r--test/CodeGen/AArch64/addsub_ext.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll (renamed from test/CodeGen/AArch64/arm64-2014-04-16-AnInfiniteLoopInDAGCombine.ll)0
-rw-r--r--test/CodeGen/AArch64/arm64-EXT-undef-mask.ll (renamed from test/CodeGen/AArch64/arm64-2014-04-29-EXT-undef-mask.ll)0
-rw-r--r--test/CodeGen/AArch64/arm64-aapcs.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-abi.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-ands-bad-peephole.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-arith.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-atomic-128.ll9
-rw-r--r--test/CodeGen/AArch64/arm64-atomic.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-build-vector.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-convert-v2f64-v2i32.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-convert-v2i32-v2f64.ll29
-rw-r--r--test/CodeGen/AArch64/arm64-convert-v4f64.ll33
-rw-r--r--test/CodeGen/AArch64/arm64-cse.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll3
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll16
-rw-r--r--test/CodeGen/AArch64/arm64-early-ifcvt.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll13
-rw-r--r--test/CodeGen/AArch64/arm64-fp128.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-frame-index.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A53.ll79
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A57.ll112
-rw-r--r--test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll7
-rw-r--r--test/CodeGen/AArch64/arm64-neon-copy.ll16
-rw-r--r--test/CodeGen/AArch64/arm64-neon-select_cc.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-shrink-v1i64.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll (renamed from test/CodeGen/AArch64/arm64-2014-04-28-sqshl-uqshl-i64Contant.ll)0
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt.ll12
-rw-r--r--test/CodeGen/AArch64/arm64-vshift.ll9
-rw-r--r--test/CodeGen/AArch64/arm64-xaluo.ll2
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll25
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll4
-rw-r--r--test/CodeGen/AArch64/branch-relax-asm.ll35
-rw-r--r--test/CodeGen/AArch64/breg.ll2
-rw-r--r--test/CodeGen/AArch64/cmpxchg-idioms.ll93
-rw-r--r--test/CodeGen/AArch64/compiler-ident.ll12
-rw-r--r--test/CodeGen/AArch64/complex-fp-to-int.ll141
-rw-r--r--test/CodeGen/AArch64/complex-int-to-fp.ll164
-rw-r--r--test/CodeGen/AArch64/directcond.ll4
-rw-r--r--test/CodeGen/AArch64/f16-convert.ll254
-rw-r--r--test/CodeGen/AArch64/fast-isel-mul.ll40
-rw-r--r--test/CodeGen/AArch64/flags-multiuse.ll2
-rw-r--r--test/CodeGen/AArch64/funcptr_cast.ll13
-rw-r--r--test/CodeGen/AArch64/global-merge-1.ll26
-rw-r--r--test/CodeGen/AArch64/global-merge-2.ll51
-rw-r--r--test/CodeGen/AArch64/global-merge-3.ll51
-rw-r--r--test/CodeGen/AArch64/global-merge-4.ll73
-rw-r--r--test/CodeGen/AArch64/global-merge.ll30
-rw-r--r--test/CodeGen/AArch64/i128-fast-isel-fallback.ll18
-rw-r--r--test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll26
-rw-r--r--test/CodeGen/AArch64/jump-table.ll6
-rw-r--r--test/CodeGen/AArch64/ldst-opt.ll468
-rw-r--r--test/CodeGen/AArch64/lit.local.cfg3
-rw-r--r--test/CodeGen/AArch64/memcpy-f128.ll19
-rw-r--r--test/CodeGen/AArch64/mul_pow2.ll123
-rw-r--r--test/CodeGen/AArch64/regress-tail-livereg.ll14
-rw-r--r--test/CodeGen/AArch64/trunc-v1i64.ll63
-rw-r--r--test/CodeGen/AArch64/tst-br.ll2
-rw-r--r--test/CodeGen/ARM/2009-11-02-NegativeLane.ll2
-rw-r--r--test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll2
-rw-r--r--test/CodeGen/ARM/2010-05-18-PostIndexBug.ll18
-rw-r--r--test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll4
-rw-r--r--test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll20
-rw-r--r--test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll6
-rw-r--r--test/CodeGen/ARM/2012-11-14-subs_carry.ll2
-rw-r--r--test/CodeGen/ARM/2013-05-05-IfConvertBug.ll12
-rw-r--r--test/CodeGen/ARM/2013-07-29-vector-or-combine.ll2
-rw-r--r--test/CodeGen/ARM/Windows/dllimport.ll61
-rw-r--r--test/CodeGen/ARM/Windows/global-minsize.ll16
-rw-r--r--test/CodeGen/ARM/Windows/long-calls.ll18
-rw-r--r--test/CodeGen/ARM/Windows/structors.ll12
-rw-r--r--test/CodeGen/ARM/Windows/vla.ll31
-rw-r--r--test/CodeGen/ARM/aliases.ll2
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll8
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll14
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll3
-rw-r--r--test/CodeGen/ARM/atomic-load-store.ll37
-rw-r--r--test/CodeGen/ARM/atomic-op.ll6
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll12
-rw-r--r--test/CodeGen/ARM/big-endian-neon-extend.ll81
-rw-r--r--test/CodeGen/ARM/big-endian-neon-trunc-store.ll26
-rw-r--r--test/CodeGen/ARM/big-endian-ret-f64.ll12
-rw-r--r--test/CodeGen/ARM/call-tc.ll6
-rw-r--r--test/CodeGen/ARM/cmpxchg-idioms.ll107
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll43
-rw-r--r--test/CodeGen/ARM/data-in-code-annotations.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll9
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll16
-rw-r--r--test/CodeGen/ARM/fptoint.ll2
-rw-r--r--test/CodeGen/ARM/global-merge-1.ll85
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt10.ll2
-rw-r--r--test/CodeGen/ARM/indirectbr-3.ll2
-rw-r--r--test/CodeGen/ARM/interrupt-attr.ll8
-rw-r--r--test/CodeGen/ARM/intrinsics-memory-barrier.ll55
-rw-r--r--test/CodeGen/ARM/jump_tables.ll32
-rw-r--r--test/CodeGen/ARM/ldstrex-m.ll59
-rw-r--r--test/CodeGen/ARM/lit.local.cfg3
-rw-r--r--test/CodeGen/ARM/lsr-unfolded-offset.ll2
-rw-r--r--test/CodeGen/ARM/metadata-default.ll16
-rw-r--r--test/CodeGen/ARM/metadata-short-enums.ll16
-rw-r--r--test/CodeGen/ARM/metadata-short-wchar.ll16
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll2
-rw-r--r--test/CodeGen/ARM/none-macho.ll10
-rw-r--r--test/CodeGen/ARM/null-streamer.ll7
-rw-r--r--test/CodeGen/ARM/reg_sequence.ll4
-rw-r--r--test/CodeGen/ARM/spill-q.ll2
-rw-r--r--test/CodeGen/ARM/struct-byval-frame-index.ll2
-rw-r--r--test/CodeGen/ARM/twoaddrinstr.ll2
-rw-r--r--test/CodeGen/ARM/va_arg.ll4
-rw-r--r--test/CodeGen/ARM/vldm-sched-a9.ll2
-rw-r--r--test/CodeGen/ARM/widen-vmovs.ll6
-rw-r--r--test/CodeGen/CPP/atomic.ll89
-rw-r--r--test/CodeGen/CPP/lit.local.cfg3
-rw-r--r--test/CodeGen/Generic/MachineBranchProb.ll3
-rw-r--r--test/CodeGen/Generic/select.ll1
-rw-r--r--test/CodeGen/Generic/stop-after.ll2
-rw-r--r--test/CodeGen/Hexagon/lit.local.cfg3
-rw-r--r--test/CodeGen/MSP430/lit.local.cfg3
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll2
-rw-r--r--test/CodeGen/Mips/2013-11-18-fp64-const0.ll2
-rw-r--r--test/CodeGen/Mips/Fast-ISel/loadstore2.ll83
-rw-r--r--test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll38
-rw-r--r--test/CodeGen/Mips/abiflags-xx.ll6
-rw-r--r--test/CodeGen/Mips/abiflags32.ll12
-rw-r--r--test/CodeGen/Mips/analyzebranch.ll35
-rw-r--r--test/CodeGen/Mips/atomic.ll626
-rw-r--r--test/CodeGen/Mips/atomicops.ll3
-rw-r--r--test/CodeGen/Mips/buildpairextractelementf64.ll4
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-fpxx.ll58
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll24
-rw-r--r--test/CodeGen/Mips/cmov.ll614
-rw-r--r--test/CodeGen/Mips/countleading.ll90
-rw-r--r--test/CodeGen/Mips/divrem.ll363
-rw-r--r--test/CodeGen/Mips/dsp-r1.ll2
-rw-r--r--test/CodeGen/Mips/eh-return32.ll14
-rw-r--r--test/CodeGen/Mips/eh-return64.ll17
-rw-r--r--test/CodeGen/Mips/ehframe-indirect.ll34
-rw-r--r--test/CodeGen/Mips/fcmp.ll783
-rw-r--r--test/CodeGen/Mips/fcopysign.ll2
-rw-r--r--test/CodeGen/Mips/fmadd1.ll324
-rw-r--r--test/CodeGen/Mips/fp-indexed-ls.ll192
-rw-r--r--test/CodeGen/Mips/fpbr.ll93
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll3
-rw-r--r--test/CodeGen/Mips/lit.local.cfg3
-rw-r--r--test/CodeGen/Mips/llvm-ir/call.ll166
-rw-r--r--test/CodeGen/Mips/llvm-ir/indirectbr.ll34
-rw-r--r--test/CodeGen/Mips/llvm-ir/ret.ll205
-rw-r--r--test/CodeGen/Mips/longbranch.ll34
-rw-r--r--test/CodeGen/Mips/madd-msub.ll241
-rw-r--r--test/CodeGen/Mips/mips16ex.ll4
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll358
-rw-r--r--test/CodeGen/Mips/mips64-fp-indexed-ls.ll110
-rw-r--r--test/CodeGen/Mips/mips64countleading.ll24
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll126
-rw-r--r--test/CodeGen/Mips/mips64muldiv.ll57
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll274
-rw-r--r--test/CodeGen/Mips/msa/special.ll4
-rw-r--r--test/CodeGen/Mips/no-odd-spreg.ll54
-rw-r--r--test/CodeGen/Mips/null-streamer.ll7
-rw-r--r--test/CodeGen/Mips/prevent-hoisting.ll144
-rw-r--r--test/CodeGen/Mips/select.ll800
-rw-r--r--test/CodeGen/Mips/selectcc.ll14
-rw-r--r--test/CodeGen/Mips/tls-alias.ll2
-rw-r--r--test/CodeGen/Mips/zeroreg.ll98
-rw-r--r--test/CodeGen/NVPTX/access-non-generic.ll8
-rw-r--r--test/CodeGen/NVPTX/arg-lowering.ll13
-rw-r--r--test/CodeGen/NVPTX/atomics.ll141
-rw-r--r--test/CodeGen/NVPTX/bfe.ll32
-rw-r--r--test/CodeGen/NVPTX/envreg.ll139
-rw-r--r--test/CodeGen/NVPTX/gvar-init.ll5
-rw-r--r--test/CodeGen/NVPTX/imad.ll9
-rw-r--r--test/CodeGen/NVPTX/inline-asm.ll7
-rw-r--r--test/CodeGen/NVPTX/isspacep.ll35
-rw-r--r--test/CodeGen/NVPTX/ldu-i8.ll6
-rw-r--r--test/CodeGen/NVPTX/ldu-ldg.ll40
-rw-r--r--test/CodeGen/NVPTX/ldu-reg-plus-offset.ll6
-rw-r--r--test/CodeGen/NVPTX/lit.local.cfg3
-rw-r--r--test/CodeGen/NVPTX/managed.ll11
-rw-r--r--test/CodeGen/NVPTX/mulwide.ll37
-rw-r--r--test/CodeGen/NVPTX/nvvm-reflect.ll16
-rw-r--r--test/CodeGen/NVPTX/rotate.ll58
-rw-r--r--test/CodeGen/NVPTX/shift-parts.ll38
-rw-r--r--test/CodeGen/NVPTX/weak-global.ll9
-rw-r--r--test/CodeGen/NVPTX/weak-linkage.ll12
-rw-r--r--test/CodeGen/PowerPC/Atomics-32.ll48
-rw-r--r--test/CodeGen/PowerPC/Frames-alloca.ll8
-rw-r--r--test/CodeGen/PowerPC/Frames-large.ll16
-rw-r--r--test/CodeGen/PowerPC/Frames-small.ll16
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll3
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll3
-rw-r--r--test/CodeGen/PowerPC/early-ret2.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion-p5.ll23
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion.ll104
-rw-r--r--test/CodeGen/PowerPC/func-addr.ll17
-rw-r--r--test/CodeGen/PowerPC/hello-reloc.s18
-rw-r--r--test/CodeGen/PowerPC/lit.local.cfg3
-rw-r--r--test/CodeGen/PowerPC/ppc64-altivec-abi.ll25
-rw-r--r--test/CodeGen/PowerPC/ppc64-byval-align.ll56
-rw-r--r--test/CodeGen/PowerPC/ppc64-calls.ll12
-rw-r--r--test/CodeGen/PowerPC/ppc64-smallarg.ll59
-rw-r--r--test/CodeGen/PowerPC/ppc64le-smallarg.ll59
-rw-r--r--test/CodeGen/PowerPC/ppcf128-endian.ll154
-rw-r--r--test/CodeGen/PowerPC/resolvefi-basereg.ll362
-rw-r--r--test/CodeGen/PowerPC/svr4-redzone.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_cmp.ll8
-rw-r--r--test/CodeGen/PowerPC/vec_misaligned.ll10
-rw-r--r--test/CodeGen/PowerPC/vec_mul.ll17
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_le.ll191
-rw-r--r--test/CodeGen/PowerPC/vperm-instcombine.ll17
-rw-r--r--test/CodeGen/PowerPC/vperm-lowering.ll66
-rw-r--r--test/CodeGen/R600/add_i64.ll6
-rw-r--r--test/CodeGen/R600/and.ll106
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i32.ll7
-rw-r--r--test/CodeGen/R600/atomic_cmp_swap_local.ll37
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll45
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll45
-rw-r--r--test/CodeGen/R600/big_alu.ll12
-rw-r--r--test/CodeGen/R600/bitcast.ll66
-rw-r--r--test/CodeGen/R600/bswap.ll50
-rw-r--r--test/CodeGen/R600/ctlz_zero_undef.ll57
-rw-r--r--test/CodeGen/R600/ctpop.ll284
-rw-r--r--test/CodeGen/R600/ctpop64.ll122
-rw-r--r--test/CodeGen/R600/cttz_zero_undef.ll57
-rw-r--r--test/CodeGen/R600/cvt_f32_ubyte.ll171
-rw-r--r--test/CodeGen/R600/default-fp-mode.ll10
-rw-r--r--test/CodeGen/R600/fceil.ll181
-rw-r--r--test/CodeGen/R600/fceil64.ll103
-rw-r--r--test/CodeGen/R600/fcopysign.f32.ll50
-rw-r--r--test/CodeGen/R600/fcopysign.f64.ll37
-rw-r--r--test/CodeGen/R600/ffloor.ll34
-rw-r--r--test/CodeGen/R600/fma.ll72
-rw-r--r--test/CodeGen/R600/fnearbyint.ll57
-rw-r--r--test/CodeGen/R600/fp16_to_fp32.ll14
-rw-r--r--test/CodeGen/R600/fp32_to_fp16.ll14
-rw-r--r--test/CodeGen/R600/fp_to_sint_i64.ll12
-rw-r--r--test/CodeGen/R600/fsub64.ll7
-rw-r--r--test/CodeGen/R600/ftrunc.ll165
-rw-r--r--test/CodeGen/R600/gv-const-addrspace.ll6
-rw-r--r--test/CodeGen/R600/indirect-private-64.ll36
-rw-r--r--test/CodeGen/R600/input-mods.ll26
-rw-r--r--test/CodeGen/R600/large-alloca.ll14
-rw-r--r--test/CodeGen/R600/large-constant-initializer.ll19
-rw-r--r--test/CodeGen/R600/lds-output-queue.ll2
-rw-r--r--test/CodeGen/R600/lds-size.ll2
-rw-r--r--test/CodeGen/R600/lit.local.cfg3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.abs.ll48
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.brev.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.clamp.ll28
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll42
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_scale.ll48
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.fract.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll13
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.ll58
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll11
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll14
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.ll13
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll29
-rw-r--r--test/CodeGen/R600/llvm.SI.gather4.ll508
-rw-r--r--test/CodeGen/R600/llvm.SI.getlod.ll44
-rw-r--r--test/CodeGen/R600/llvm.exp2.ll93
-rw-r--r--test/CodeGen/R600/llvm.log2.ll79
-rw-r--r--test/CodeGen/R600/llvm.rint.f64.ll20
-rw-r--r--test/CodeGen/R600/llvm.rint.ll32
-rw-r--r--test/CodeGen/R600/load.ll3
-rw-r--r--test/CodeGen/R600/local-atomics.ll254
-rw-r--r--test/CodeGen/R600/local-atomics64.ll251
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll4
-rw-r--r--test/CodeGen/R600/local-memory.ll2
-rw-r--r--test/CodeGen/R600/mul.ll42
-rw-r--r--test/CodeGen/R600/no-initializer-constant-addrspace.ll20
-rw-r--r--test/CodeGen/R600/or.ll6
-rw-r--r--test/CodeGen/R600/parallelandifcollapse.ll6
-rw-r--r--test/CodeGen/R600/parallelorifcollapse.ll5
-rw-r--r--test/CodeGen/R600/private-memory-atomics.ll31
-rw-r--r--test/CodeGen/R600/private-memory-broken.ll20
-rw-r--r--test/CodeGen/R600/private-memory.ll119
-rw-r--r--test/CodeGen/R600/pv.ll4
-rw-r--r--test/CodeGen/R600/reorder-stores.ll104
-rw-r--r--test/CodeGen/R600/rotl.i64.ll34
-rw-r--r--test/CodeGen/R600/rotl.ll54
-rw-r--r--test/CodeGen/R600/rotr.i64.ll58
-rw-r--r--test/CodeGen/R600/rotr.ll67
-rw-r--r--test/CodeGen/R600/rsq.ll26
-rw-r--r--test/CodeGen/R600/saddo.ll62
-rw-r--r--test/CodeGen/R600/scalar_to_vector.ll80
-rw-r--r--test/CodeGen/R600/sdiv.ll90
-rw-r--r--test/CodeGen/R600/setcc-equivalent.ll1
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll4
-rw-r--r--test/CodeGen/R600/shl.ll117
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll18
-rw-r--r--test/CodeGen/R600/sign_extend.ll63
-rw-r--r--test/CodeGen/R600/simplify-demanded-bits-build-pair.ll7
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll22
-rw-r--r--test/CodeGen/R600/sint_to_fp64.ll32
-rw-r--r--test/CodeGen/R600/sra.ll130
-rw-r--r--test/CodeGen/R600/srem.ll50
-rw-r--r--test/CodeGen/R600/srl.ll126
-rw-r--r--test/CodeGen/R600/ssubo.ll64
-rw-r--r--test/CodeGen/R600/store.ll3
-rw-r--r--test/CodeGen/R600/sub.ll50
-rw-r--r--test/CodeGen/R600/uaddo.ll56
-rw-r--r--test/CodeGen/R600/udivrem.ll358
-rw-r--r--test/CodeGen/R600/uint_to_fp.f64.ll27
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll76
-rw-r--r--test/CodeGen/R600/usubo.ll66
-rw-r--r--test/CodeGen/R600/vector-alloca.ll74
-rw-r--r--test/CodeGen/R600/xor.ll66
-rw-r--r--test/CodeGen/SPARC/atomics.ll6
-rw-r--r--test/CodeGen/SPARC/lit.local.cfg3
-rw-r--r--test/CodeGen/SystemZ/Large/lit.local.cfg3
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-01.ll6
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-02.ll6
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-03.ll36
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll27
-rw-r--r--test/CodeGen/SystemZ/lit.local.cfg3
-rw-r--r--test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll18
-rw-r--r--test/CodeGen/Thumb/dyn-stackalloc.ll7
-rw-r--r--test/CodeGen/Thumb/fastcc.ll36
-rw-r--r--test/CodeGen/Thumb/lit.local.cfg3
-rw-r--r--test/CodeGen/Thumb/thumb-ldm.ll1
-rw-r--r--test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll1
-rw-r--r--test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll2
-rw-r--r--test/CodeGen/Thumb2/buildvector-crash.ll2
-rw-r--r--test/CodeGen/Thumb2/cross-rc-coalescing-2.ll2
-rw-r--r--test/CodeGen/Thumb2/ldr-str-imm12.ll2
-rw-r--r--test/CodeGen/Thumb2/lit.local.cfg3
-rw-r--r--test/CodeGen/Thumb2/thumb2-branch.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cbnz.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt3.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-spill-q.ll2
-rw-r--r--test/CodeGen/Thumb2/tpsoft.ll54
-rw-r--r--test/CodeGen/Thumb2/v8_IT_3.ll8
-rw-r--r--test/CodeGen/Thumb2/v8_IT_5.ll4
-rw-r--r--test/CodeGen/X86/2007-05-05-Personality.ll6
-rw-r--r--test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll30
-rw-r--r--test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll2
-rw-r--r--test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll4
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll4
-rw-r--r--test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll2
-rw-r--r--test/CodeGen/X86/2010-10-08-cmpxchg8b.ll3
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll2
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll18
-rw-r--r--test/CodeGen/X86/2014-05-29-factorial.ll24
-rw-r--r--test/CodeGen/X86/2014-05-30-CombineAddNSW.ll20
-rw-r--r--test/CodeGen/X86/Atomics-64.ll62
-rw-r--r--test/CodeGen/X86/GC/lit.local.cfg3
-rw-r--r--test/CodeGen/X86/aliases.ll33
-rw-r--r--test/CodeGen/X86/atom-fixup-lea4.ll23
-rw-r--r--test/CodeGen/X86/atomic-load-store-wide.ll2
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll65
-rw-r--r--test/CodeGen/X86/atomic-ops-ancient-64.ll43
-rw-r--r--test/CodeGen/X86/atomic128.ll316
-rw-r--r--test/CodeGen/X86/atomic16.ll77
-rw-r--r--test/CodeGen/X86/atomic32.ll80
-rw-r--r--test/CodeGen/X86/atomic64.ll41
-rw-r--r--test/CodeGen/X86/atomic6432.ll92
-rw-r--r--test/CodeGen/X86/atomic8.ll79
-rw-r--r--test/CodeGen/X86/atomic_op.ll21
-rw-r--r--test/CodeGen/X86/avx-blend.ll44
-rw-r--r--test/CodeGen/X86/avx-intel-ocl.ll62
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll24
-rw-r--r--test/CodeGen/X86/avx-shuffle.ll8
-rw-r--r--test/CodeGen/X86/avx-splat.ll9
-rw-r--r--test/CodeGen/X86/avx-vperm2f128.ll2
-rw-r--r--test/CodeGen/X86/avx-vshufp.ll10
-rw-r--r--test/CodeGen/X86/avx2-shuffle.ll18
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll8
-rw-r--r--test/CodeGen/X86/avx512-inc-dec.ll13
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll68
-rw-r--r--test/CodeGen/X86/avx512-nontemporal.ll19
-rw-r--r--test/CodeGen/X86/avx512-shuffle.ll62
-rw-r--r--test/CodeGen/X86/bswap-vector.ll29
-rw-r--r--test/CodeGen/X86/cmp.ll13
-rw-r--r--test/CodeGen/X86/cmpxchg-i1.ll87
-rw-r--r--test/CodeGen/X86/cmpxchg-i128-i1.ll83
-rw-r--r--test/CodeGen/X86/coalescer-remat.ll3
-rw-r--r--test/CodeGen/X86/coff-comdat.ll92
-rw-r--r--test/CodeGen/X86/coff-comdat2.ll9
-rw-r--r--test/CodeGen/X86/coff-comdat3.ll8
-rw-r--r--test/CodeGen/X86/combine-64bit-vec-binop.ll273
-rw-r--r--test/CodeGen/X86/combine-or.ll8
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-2.ll164
-rw-r--r--test/CodeGen/X86/computeKnownBits_urem.ll14
-rw-r--r--test/CodeGen/X86/cvt16.ll64
-rw-r--r--test/CodeGen/X86/dagcombine-and-setcc.ll47
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll169
-rw-r--r--test/CodeGen/X86/dllexport-x86_64.ll2
-rw-r--r--test/CodeGen/X86/elf-comdat.ll11
-rw-r--r--test/CodeGen/X86/elf-comdat2.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-args-fail2.ll10
-rw-r--r--test/CodeGen/X86/fast-isel-args.ll24
-rw-r--r--test/CodeGen/X86/fast-isel-branch_weights.ll19
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch2.ll294
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch3.ll470
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll689
-rw-r--r--test/CodeGen/X86/fast-isel-fold-mem.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov.ll62
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov2.ll255
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmp.ll50
-rw-r--r--test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll138
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll391
-rw-r--r--test/CodeGen/X86/fast-isel-select.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-sse12-fptoint.ll54
-rw-r--r--test/CodeGen/X86/float-asmprint.ll5
-rw-r--r--test/CodeGen/X86/frameaddr.ll44
-rw-r--r--test/CodeGen/X86/gcc_except_table.ll10
-rw-r--r--test/CodeGen/X86/haddsub-2.ll802
-rw-r--r--test/CodeGen/X86/haddsub-undef.ll325
-rw-r--r--test/CodeGen/X86/i8-umulo.ll24
-rw-r--r--test/CodeGen/X86/jump_table_alias.ll33
-rw-r--r--test/CodeGen/X86/jump_table_bitcast.ll46
-rw-r--r--test/CodeGen/X86/jump_tables.ll272
-rw-r--r--test/CodeGen/X86/libcall-sret.ll28
-rw-r--r--test/CodeGen/X86/lit.local.cfg3
-rw-r--r--test/CodeGen/X86/lower-bitcast.ll103
-rw-r--r--test/CodeGen/X86/macho-comdat.ll6
-rw-r--r--test/CodeGen/X86/null-streamer.ll18
-rw-r--r--test/CodeGen/X86/pr20020.ll73
-rw-r--r--test/CodeGen/X86/pr20088.ll9
-rw-r--r--test/CodeGen/X86/pr5145.ll16
-rw-r--r--test/CodeGen/X86/pshufd-combine-crash.ll14
-rw-r--r--test/CodeGen/X86/rdpmc.ll22
-rw-r--r--test/CodeGen/X86/shift-parts.ll4
-rw-r--r--test/CodeGen/X86/sqrt.ll26
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll27
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll318
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub.ll296
-rw-r--r--test/CodeGen/X86/sse41-blend.ll18
-rw-r--r--test/CodeGen/X86/sse41.ll22
-rw-r--r--test/CodeGen/X86/stackmap-fast-isel.ll165
-rw-r--r--test/CodeGen/X86/stackmap-liveness.ll153
-rw-r--r--test/CodeGen/X86/swizzle-2.ll515
-rw-r--r--test/CodeGen/X86/swizzle-avx2.ll91
-rw-r--r--test/CodeGen/X86/testb-je-fusion.ll20
-rw-r--r--test/CodeGen/X86/vec_cast2.ll27
-rw-r--r--test/CodeGen/X86/vec_splat.ll20
-rw-r--r--test/CodeGen/X86/vec_split.ll33
-rw-r--r--test/CodeGen/X86/vector-gep.ll2
-rw-r--r--test/CodeGen/X86/vector-idiv.ll42
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll196
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll219
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v4.ll170
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v8.ll493
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll119
-rw-r--r--test/CodeGen/X86/vselect.ll14
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll25
-rw-r--r--test/CodeGen/X86/widen_cast-6.ll6
-rw-r--r--test/CodeGen/X86/widen_conversions.ll18
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll4
-rw-r--r--test/CodeGen/X86/win64_eh.ll170
-rw-r--r--test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll6
-rw-r--r--test/CodeGen/X86/x86-64-frameaddr.ll15
-rw-r--r--test/CodeGen/X86/x86-64-static-relo-movl.ll24
-rw-r--r--test/CodeGen/X86/x86-frameaddr.ll9
-rw-r--r--test/CodeGen/X86/x86-frameaddr2.ll9
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll41
-rw-r--r--test/CodeGen/X86/xaluo.ll743
-rw-r--r--test/CodeGen/XCore/dwarf_debug.ll39
-rw-r--r--test/CodeGen/XCore/lit.local.cfg3
473 files changed, 26034 insertions, 2490 deletions
diff --git a/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll b/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
new file mode 100644
index 0000000..2df9c37
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
@@ -0,0 +1,55 @@
+; RUN: llc -O3 -mcpu=cortex-a53 -mtriple=aarch64--linux-gnu %s -o - | FileCheck %s
+; PR20188: don't crash when merging sexts.
+
+; CHECK: foo:
+define void @foo() unnamed_addr align 2 {
+entry:
+ br label %invoke.cont145
+
+invoke.cont145:
+ %or.cond = and i1 undef, false
+ br i1 %or.cond, label %if.then274, label %invoke.cont145
+
+if.then274:
+ %0 = load i32* null, align 4
+ br i1 undef, label %invoke.cont291, label %if.else313
+
+invoke.cont291:
+ %idxprom.i.i.i605 = sext i32 %0 to i64
+ %arrayidx.i.i.i607 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i605
+ %idxprom.i.i.i596 = sext i32 %0 to i64
+ %arrayidx.i.i.i598 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i596
+ br label %if.end356
+
+if.else313:
+ %cmp314 = fcmp olt double undef, 0.000000e+00
+ br i1 %cmp314, label %invoke.cont317, label %invoke.cont353
+
+invoke.cont317:
+ br i1 undef, label %invoke.cont326, label %invoke.cont334
+
+invoke.cont326:
+ %idxprom.i.i.i587 = sext i32 %0 to i64
+ %arrayidx.i.i.i589 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i587
+ %sub329 = fsub fast double undef, undef
+ br label %invoke.cont334
+
+invoke.cont334:
+ %lo.1 = phi double [ %sub329, %invoke.cont326 ], [ undef, %invoke.cont317 ]
+ br i1 undef, label %invoke.cont342, label %if.end356
+
+invoke.cont342:
+ %idxprom.i.i.i578 = sext i32 %0 to i64
+ %arrayidx.i.i.i580 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i578
+ br label %if.end356
+
+invoke.cont353:
+ %idxprom.i.i.i572 = sext i32 %0 to i64
+ %arrayidx.i.i.i574 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i572
+ br label %if.end356
+
+if.end356:
+ %lo.2 = phi double [ 0.000000e+00, %invoke.cont291 ], [ %lo.1, %invoke.cont342 ], [ undef, %invoke.cont353 ], [ %lo.1, %invoke.cont334 ]
+ call void null(i32 %0, double %lo.2)
+ unreachable
+}
diff --git a/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
new file mode 100644
index 0000000..ee90d19
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -o - | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-apple-macosx10.9"
+
+; Check that sexts get promoted above adds.
+define void @foo(i32* nocapture %a, i32 %i) {
+entry:
+; CHECK-LABEL: _foo:
+; CHECK: add
+; CHECK-NEXT: ldp
+; CHECK-NEXT: add
+; CHECK-NEXT: str
+; CHECK-NEXT: ret
+ %add = add nsw i32 %i, 1
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %0 = load i32* %arrayidx, align 4
+ %add1 = add nsw i32 %i, 2
+ %idxprom2 = sext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds i32* %a, i64 %idxprom2
+ %1 = load i32* %arrayidx3, align 4
+ %add4 = add nsw i32 %1, %0
+ %idxprom5 = sext i32 %i to i64
+ %arrayidx6 = getelementptr inbounds i32* %a, i64 %idxprom5
+ store i32 %add4, i32* %arrayidx6, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/addsub_ext.ll b/test/CodeGen/AArch64/addsub_ext.ll
index a2266b1..ceea8a0 100644
--- a/test/CodeGen/AArch64/addsub_ext.ll
+++ b/test/CodeGen/AArch64/addsub_ext.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
diff --git a/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll b/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
index d1840d3..7da2d2c 100644
--- a/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
+++ b/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
@@ -2,14 +2,14 @@
; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK-LINUX
; <rdar://problem/11392109>
-define hidden void @t() optsize ssp {
+define hidden void @t(i64* %addr) optsize ssp {
entry:
- store i64 zext (i32 ptrtoint (i64 (i32)* @x to i32) to i64), i64* undef, align 8
+ store i64 zext (i32 ptrtoint (i64 (i32)* @x to i32) to i64), i64* %addr, align 8
; CHECK: adrp x{{[0-9]+}}, _x@GOTPAGE
; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, _x@GOTPAGEOFF]
; CHECK-NEXT: and x{{[0-9]+}}, x{{[0-9]+}}, #0xffffffff
; CHECK-NEXT: str x{{[0-9]+}}, [x{{[0-9]+}}]
- unreachable
+ ret void
}
declare i64 @x(i32) optsize
diff --git a/test/CodeGen/AArch64/arm64-2014-04-16-AnInfiniteLoopInDAGCombine.ll b/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
index a73b707..a73b707 100644
--- a/test/CodeGen/AArch64/arm64-2014-04-16-AnInfiniteLoopInDAGCombine.ll
+++ b/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
diff --git a/test/CodeGen/AArch64/arm64-2014-04-29-EXT-undef-mask.ll b/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
index 1b2d543..1b2d543 100644
--- a/test/CodeGen/AArch64/arm64-2014-04-29-EXT-undef-mask.ll
+++ b/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
diff --git a/test/CodeGen/AArch64/arm64-aapcs.ll b/test/CodeGen/AArch64/arm64-aapcs.ll
index b713f0d..ccf1371 100644
--- a/test/CodeGen/AArch64/arm64-aapcs.ll
+++ b/test/CodeGen/AArch64/arm64-aapcs.ll
@@ -101,3 +101,11 @@ define fp128 @test_fp128([8 x float] %arg0, fp128 %arg1) {
; CHECK: ldr {{q[0-9]+}}, [sp]
ret fp128 %arg1
}
+
+; Check if VPR can be correctly pass by stack.
+define <2 x double> @test_vreg_stack([8 x <2 x double>], <2 x double> %varg_stack) {
+entry:
+; CHECK-LABEL: test_vreg_stack:
+; CHECK: ldr {{q[0-9]+}}, [sp]
+ ret <2 x double> %varg_stack;
+}
diff --git a/test/CodeGen/AArch64/arm64-abi.ll b/test/CodeGen/AArch64/arm64-abi.ll
index e2de434..a955029 100644
--- a/test/CodeGen/AArch64/arm64-abi.ll
+++ b/test/CodeGen/AArch64/arm64-abi.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
+; RUN: llc < %s -debug -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
; RUN: llc < %s -O0 | FileCheck -check-prefix=FAST %s
+; REQUIRES: asserts
target triple = "arm64-apple-darwin"
; rdar://9932559
@@ -8,15 +9,15 @@ entry:
; CHECK-LABEL: i8i16callee:
; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
; They are i8, i16, i8 and i8.
-; CHECK: ldrsb {{w[0-9]+}}, [sp, #5]
-; CHECK: ldrsh {{w[0-9]+}}, [sp, #2]
-; CHECK: ldrsb {{w[0-9]+}}, [sp]
-; CHECK: ldrsb {{w[0-9]+}}, [sp, #4]
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp, #5]
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp, #4]
+; CHECK-DAG: ldrsh {{w[0-9]+}}, [sp, #2]
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp]
; FAST-LABEL: i8i16callee:
-; FAST: ldrb {{w[0-9]+}}, [sp, #5]
-; FAST: ldrb {{w[0-9]+}}, [sp, #4]
-; FAST: ldrh {{w[0-9]+}}, [sp, #2]
-; FAST: ldrb {{w[0-9]+}}, [sp]
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp, #5]
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp, #4]
+; FAST-DAG: ldrsh {{w[0-9]+}}, [sp, #2]
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp]
%conv = sext i8 %a4 to i64
%conv3 = sext i16 %a5 to i64
%conv8 = sext i8 %b1 to i64
@@ -44,10 +45,10 @@ entry:
; CHECK: i8i16caller
; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
; They are i8, i16, i8 and i8.
-; CHECK: strb {{w[0-9]+}}, [sp, #5]
-; CHECK: strb {{w[0-9]+}}, [sp, #4]
-; CHECK: strh {{w[0-9]+}}, [sp, #2]
-; CHECK: strb {{w[0-9]+}}, [sp]
+; CHECK-DAG: strb {{w[0-9]+}}, [sp, #5]
+; CHECK-DAG: strb {{w[0-9]+}}, [sp, #4]
+; CHECK-DAG: strh {{w[0-9]+}}, [sp, #2]
+; CHECK-DAG: strb {{w[0-9]+}}, [sp]
; CHECK: bl
; FAST: i8i16caller
; FAST: strb {{w[0-9]+}}, [sp]
diff --git a/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll b/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll
index 34d6287..38661a5 100644
--- a/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll
+++ b/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -o - | FileCheck %s
+; RUN: llc %s -o - -aarch64-atomic-cfg-tidy=0 | FileCheck %s
; Check that ANDS (tst) is not merged with ADD when the immediate
; is not 0.
; <rdar://problem/16693089>
@@ -8,18 +8,18 @@ target triple = "arm64-apple-ios"
; CHECK-LABEL: tst1:
; CHECK: add [[REG:w[0-9]+]], w{{[0-9]+}}, #1
; CHECK: tst [[REG]], #0x1
-define void @tst1() {
+define void @tst1(i1 %tst, i32 %true) {
entry:
- br i1 undef, label %for.end, label %for.body
+ br i1 %tst, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%result.09 = phi i32 [ %add2.result.0, %for.body ], [ 1, %entry ]
%i.08 = phi i32 [ %inc, %for.body ], [ 2, %entry ]
%and = and i32 %i.08, 1
%cmp1 = icmp eq i32 %and, 0
- %add2.result.0 = select i1 %cmp1, i32 undef, i32 %result.09
+ %add2.result.0 = select i1 %cmp1, i32 %true, i32 %result.09
%inc = add nsw i32 %i.08, 1
- %cmp = icmp slt i32 %i.08, undef
+ %cmp = icmp slt i32 %i.08, %true
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.body
diff --git a/test/CodeGen/AArch64/arm64-arith.ll b/test/CodeGen/AArch64/arm64-arith.ll
index ed9b569..f36e706 100644
--- a/test/CodeGen/AArch64/arm64-arith.ll
+++ b/test/CodeGen/AArch64/arm64-arith.ll
@@ -260,3 +260,11 @@ define i64 @f3(i64 %a) nounwind readnone ssp {
%res = mul nsw i64 %a, 17
ret i64 %res
}
+
+define i32 @f4(i32 %a) nounwind readnone ssp {
+; CHECK-LABEL: f4:
+; CHECK-NEXT: add w0, w0, w0, lsl #1
+; CHECK-NEXT: ret
+ %res = mul i32 %a, 3
+ ret i32 %res
+}
diff --git a/test/CodeGen/AArch64/arm64-atomic-128.ll b/test/CodeGen/AArch64/arm64-atomic-128.ll
index 3b43aa1..3377849 100644
--- a/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -13,7 +13,8 @@ define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
; CHECK: stxp [[SCRATCH_RES:w[0-9]+]], x4, x5, [x[[ADDR]]]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
; CHECK: [[DONE]]:
- %val = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
ret i128 %val
}
@@ -21,8 +22,10 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_nand:
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
; CHECK: ldxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
-; CHECK-DAG: bic [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
-; CHECK-DAG: bic [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK-DAG: and [[TMP_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
+; CHECK-DAG: and [[TMP_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK-DAG: mvn [[SCRATCH_REGLO:x[0-9]+]], [[TMP_REGLO]]
+; CHECK-DAG: mvn [[SCRATCH_REGHI:x[0-9]+]], [[TMP_REGHI]]
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll
index aa9b284..b56f91d 100644
--- a/test/CodeGen/AArch64/arm64-atomic.ll
+++ b/test/CodeGen/AArch64/arm64-atomic.ll
@@ -10,7 +10,8 @@ define i32 @val_compare_and_swap(i32* %p) {
; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[NEWVAL_REG]], [x0]
; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
; CHECK: [[LABEL2]]:
- %val = cmpxchg i32* %p, i32 7, i32 4 acquire acquire
+ %pair = cmpxchg i32* %p, i32 7, i32 4 acquire acquire
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -25,7 +26,8 @@ define i64 @val_compare_and_swap_64(i64* %p) {
; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], x[[NEWVAL_REG]], [x0]
; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
; CHECK: [[LABEL2]]:
- %val = cmpxchg i64* %p, i64 7, i64 4 monotonic monotonic
+ %pair = cmpxchg i64* %p, i64 7, i64 4 monotonic monotonic
+ %val = extractvalue { i64, i1 } %pair, 0
ret i64 %val
}
@@ -33,7 +35,8 @@ define i32 @fetch_and_nand(i32* %p) {
; CHECK-LABEL: fetch_and_nand:
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0]
-; CHECK: and [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], #0xfffffff8
+; CHECK: mvn [[TMP_REG:w[0-9]+]], w[[DEST_REG]]
+; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], [[TMP_REG]], #0xfffffff8
; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
@@ -46,8 +49,9 @@ define i64 @fetch_and_nand_64(i64* %p) {
; CHECK-LABEL: fetch_and_nand_64:
; CHECK: mov x[[ADDR:[0-9]+]], x0
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: ldaxr [[DEST_REG:x[0-9]+]], [x[[ADDR]]]
-; CHECK: and [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0xfffffffffffffff8
+; CHECK: ldaxr x[[DEST_REG:[0-9]+]], [x[[ADDR]]]
+; CHECK: mvn w[[TMP_REG:[0-9]+]], w[[DEST_REG]]
+; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], x[[TMP_REG]], #0xfffffffffffffff8
; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
diff --git a/test/CodeGen/AArch64/arm64-build-vector.ll b/test/CodeGen/AArch64/arm64-build-vector.ll
index c109263..d0f6db0 100644
--- a/test/CodeGen/AArch64/arm64-build-vector.ll
+++ b/test/CodeGen/AArch64/arm64-build-vector.ll
@@ -33,3 +33,27 @@ define <4 x float> @foo(float %a, float %b, float %c, float %d) nounwind {
%4 = insertelement <4 x float> %3, float %d, i32 3
ret <4 x float> %4
}
+
+define <8 x i16> @build_all_zero(<8 x i16> %a) #1 {
+; CHECK-LABEL: build_all_zero:
+; CHECK: movz w[[GREG:[0-9]+]], #0xae80
+; CHECK-NEXT: fmov s[[FREG:[0-9]+]], w[[GREG]]
+; CHECK-NEXT: mul.8h v0, v0, v[[FREG]]
+ %b = add <8 x i16> %a, <i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>
+ %c = mul <8 x i16> %b, <i16 -20864, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>
+ ret <8 x i16> %c
+}
+
+; There is an optimization in DAG Combiner as following:
+; fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
+; -> (BUILD_VECTOR A, B, ..., C, D, ...)
+; This case checks when A,B and C,D are different types, there should be no
+; assertion failure.
+define <8 x i16> @concat_2_build_vector(<4 x i16> %in0) {
+; CHECK-LABEL: concat_2_build_vector:
+; CHECK: movi
+ %vshl_n = shl <4 x i16> %in0, <i16 8, i16 8, i16 8, i16 8>
+ %vshl_n2 = shl <4 x i16> %vshl_n, <i16 9, i16 9, i16 9, i16 9>
+ %shuffle.i = shufflevector <4 x i16> %vshl_n2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-convert-v2f64-v2i32.ll b/test/CodeGen/AArch64/arm64-convert-v2f64-v2i32.ll
deleted file mode 100644
index d862b1e..0000000
--- a/test/CodeGen/AArch64/arm64-convert-v2f64-v2i32.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
-
-; CHECK: fptosi_1
-; CHECK: fcvtzs.2d
-; CHECK: xtn.2s
-; CHECK: ret
-define void @fptosi_1() nounwind noinline ssp {
-entry:
- %0 = fptosi <2 x double> undef to <2 x i32>
- store <2 x i32> %0, <2 x i32>* undef, align 8
- ret void
-}
-
-; CHECK: fptoui_1
-; CHECK: fcvtzu.2d
-; CHECK: xtn.2s
-; CHECK: ret
-define void @fptoui_1() nounwind noinline ssp {
-entry:
- %0 = fptoui <2 x double> undef to <2 x i32>
- store <2 x i32> %0, <2 x i32>* undef, align 8
- ret void
-}
-
diff --git a/test/CodeGen/AArch64/arm64-convert-v2i32-v2f64.ll b/test/CodeGen/AArch64/arm64-convert-v2i32-v2f64.ll
deleted file mode 100644
index daaf1e0..0000000
--- a/test/CodeGen/AArch64/arm64-convert-v2i32-v2f64.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
-
-define <2 x double> @f1(<2 x i32> %v) nounwind readnone {
-; CHECK-LABEL: f1:
-; CHECK: sshll.2d v0, v0, #0
-; CHECK-NEXT: scvtf.2d v0, v0
-; CHECK-NEXT: ret
- %conv = sitofp <2 x i32> %v to <2 x double>
- ret <2 x double> %conv
-}
-define <2 x double> @f2(<2 x i32> %v) nounwind readnone {
-; CHECK-LABEL: f2:
-; CHECK: ushll.2d v0, v0, #0
-; CHECK-NEXT: ucvtf.2d v0, v0
-; CHECK-NEXT: ret
- %conv = uitofp <2 x i32> %v to <2 x double>
- ret <2 x double> %conv
-}
-
-; CHECK: autogen_SD19655
-; CHECK: scvtf
-; CHECK: ret
-define void @autogen_SD19655() {
- %T = load <2 x i64>* undef
- %F = sitofp <2 x i64> undef to <2 x float>
- store <2 x float> %F, <2 x float>* undef
- ret void
-}
-
diff --git a/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
new file mode 100644
index 0000000..7123e5e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -march=arm64 | FileCheck %s
+
+
+define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) {
+; CHECK: fptosi_v4f64_to_v4i16
+; CHECK-DAG: fcvtzs v[[LHS:[0-9]+]].2d, v1.2d
+; CHECK-DAG: fcvtzs v[[RHS:[0-9]+]].2d, v0.2d
+; CHECK-DAG: xtn v[[LHS_NA:[0-9]+]].2s, v[[LHS]].2d
+; CHECK-DAG: xtn v[[RHS_NA:[0-9]+]].2s, v[[RHS]].2d
+; CHECK: uzp1 v0.4h, v[[RHS_NA]].4h, v[[LHS_NA]].4h
+ %tmp1 = load <4 x double>* %ptr
+ %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
+; CHECK: fptosi_v4f64_to_v4i8
+; CHECK-DAG: fcvtzs v[[CONV3:[0-9]+]].2d, v3.2d
+; CHECK-DAG: fcvtzs v[[CONV2:[0-9]+]].2d, v2.2d
+; CHECK-DAG: fcvtzs v[[CONV1:[0-9]+]].2d, v1.2d
+; CHECK-DAG: fcvtzs v[[CONV0:[0-9]+]].2d, v0.2d
+; CHECK-DAG: xtn v[[NA3:[0-9]+]].2s, v[[CONV3]].2d
+; CHECK-DAG: xtn v[[NA2:[0-9]+]].2s, v[[CONV2]].2d
+; CHECK-DAG: xtn v[[NA1:[0-9]+]].2s, v[[CONV1]].2d
+; CHECK-DAG: xtn v[[NA0:[0-9]+]].2s, v[[CONV0]].2d
+; CHECK-DAG: uzp1 v[[TMP1:[0-9]+]].4h, v[[CONV2]].4h, v[[CONV3]].4h
+; CHECK-DAG: uzp1 v[[TMP2:[0-9]+]].4h, v[[CONV0]].4h, v[[CONV1]].4h
+; CHECK: uzp1 v0.8b, v[[TMP2]].8b, v[[TMP1]].8b
+ %tmp1 = load <8 x double>* %ptr
+ %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
+ ret <8 x i8> %tmp2
+}
+
diff --git a/test/CodeGen/AArch64/arm64-cse.ll b/test/CodeGen/AArch64/arm64-cse.ll
index bb14c89..5d62cfe 100644
--- a/test/CodeGen/AArch64/arm64-cse.ll
+++ b/test/CodeGen/AArch64/arm64-cse.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O3 < %s | FileCheck %s
+; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 | FileCheck %s
target triple = "arm64-apple-ios"
; rdar://12462006
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll b/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
index 2cf0135..6eed48b 100644
--- a/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
@@ -1,5 +1,8 @@
; RUN: llc -mcpu=cyclone < %s | FileCheck %s
+; r208640 broke ppc64/Linux self-hosting; xfailing while this is worked on.
+; XFAIL: *
+
target datalayout = "e-i64:64-n32:64-S128"
target triple = "arm64-apple-ios"
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll b/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll
index 2e4b658..ce132c6 100644
--- a/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll
@@ -13,12 +13,12 @@ target triple = "arm64-apple-ios"
; CHECK-LABEL: XX:
; CHECK: ldr
-define void @XX(%class.A* %K) {
+define i32 @XX(%class.A* %K, i1 %tst, i32* %addr, %class.C** %ppC, %class.C* %pC) {
entry:
- br i1 undef, label %if.then, label %lor.rhs.i
+ br i1 %tst, label %if.then, label %lor.rhs.i
lor.rhs.i: ; preds = %entry
- %tmp = load i32* undef, align 4
+ %tmp = load i32* %addr, align 4
%y.i.i.i = getelementptr inbounds %class.A* %K, i64 0, i32 1
%tmp1 = load i64* %y.i.i.i, align 8
%U.sroa.3.8.extract.trunc.i = trunc i64 %tmp1 to i32
@@ -30,17 +30,17 @@ lor.rhs.i: ; preds = %entry
%add16.i = add nsw i32 %add12.i, %div15.i
%rem.i.i = srem i32 %add16.i, %tmp
%idxprom = sext i32 %rem.i.i to i64
- %arrayidx = getelementptr inbounds %class.C** undef, i64 %idxprom
- %tobool533 = icmp eq %class.C* undef, null
+ %arrayidx = getelementptr inbounds %class.C** %ppC, i64 %idxprom
+ %tobool533 = icmp eq %class.C* %pC, null
br i1 %tobool533, label %while.end, label %while.body
if.then: ; preds = %entry
- unreachable
+ ret i32 42
while.body: ; preds = %lor.rhs.i
- unreachable
+ ret i32 5
while.end: ; preds = %lor.rhs.i
%tmp3 = load %class.C** %arrayidx, align 8
- unreachable
+ ret i32 50
}
diff --git a/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/test/CodeGen/AArch64/arm64-early-ifcvt.ll
index 17d783a..44150c2 100644
--- a/test/CodeGen/AArch64/arm64-early-ifcvt.ll
+++ b/test/CodeGen/AArch64/arm64-early-ifcvt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -stress-early-ifcvt | FileCheck %s
+; RUN: llc < %s -stress-early-ifcvt -aarch64-atomic-cfg-tidy=0 | FileCheck %s
target triple = "arm64-apple-macosx"
; CHECK: mm2
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
index a3d5f6c..1152988 100644
--- a/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
+++ b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -133,3 +133,16 @@ define void @t8() {
call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 4, i32 1, i1 false)
ret void
}
+
+define void @test_distant_memcpy(i8* %dst) {
+; ARM64-LABEL: test_distant_memcpy:
+; ARM64: mov [[ARRAY:x[0-9]+]], sp
+; ARM64: movz [[OFFSET:x[0-9]+]], #0x1f40
+; ARM64: add x[[ADDR:[0-9]+]], [[ARRAY]], [[OFFSET]]
+; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]]
+; ARM64: strb [[BYTE]], [x0]
+ %array = alloca i8, i32 8192
+ %elem = getelementptr i8* %array, i32 8000
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i32 1, i1 false)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-fp128.ll b/test/CodeGen/AArch64/arm64-fp128.ll
index 57bbb93..b1d5010 100644
--- a/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/test/CodeGen/AArch64/arm64-fp128.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
@lhs = global fp128 zeroinitializer, align 16
@rhs = global fp128 zeroinitializer, align 16
diff --git a/test/CodeGen/AArch64/arm64-frame-index.ll b/test/CodeGen/AArch64/arm64-frame-index.ll
index 4a91ff3..321f335 100644
--- a/test/CodeGen/AArch64/arm64-frame-index.ll
+++ b/test/CodeGen/AArch64/arm64-frame-index.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s
+; RUN: llc -march=arm64 -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
; rdar://11935841
define void @t1() nounwind ssp {
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
index f88bd6a..bc7ed7f 100644
--- a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -122,3 +122,82 @@ define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
}
declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
+
+; Regression Test for PR20057.
+;
+; Cortex-A53 machine model stalls on A53UnitFPMDS contention. Instructions that
+; are otherwise ready are jammed in the pending queue.
+; CHECK: ********** MI Scheduling **********
+; CHECK: testResourceConflict
+; CHECK: *** Final schedule for BB#0 ***
+; CHECK: BRK
+; CHECK: ********** INTERVALS **********
+define void @testResourceConflict(float* %ptr) {
+entry:
+ %add1 = fadd float undef, undef
+ %mul2 = fmul float undef, undef
+ %add3 = fadd float %mul2, undef
+ %mul4 = fmul float undef, %add3
+ %add5 = fadd float %mul4, undef
+ %sub6 = fsub float 0.000000e+00, undef
+ %sub7 = fsub float %add5, undef
+ %div8 = fdiv float 1.000000e+00, undef
+ %mul9 = fmul float %div8, %sub7
+ %mul14 = fmul float %sub6, %div8
+ %mul10 = fsub float -0.000000e+00, %mul14
+ %mul15 = fmul float undef, %div8
+ %mul11 = fsub float -0.000000e+00, %mul15
+ %mul12 = fmul float 0.000000e+00, %div8
+ %mul13 = fmul float %add1, %mul9
+ %mul21 = fmul float %add5, %mul11
+ %add22 = fadd float %mul13, %mul21
+ store float %add22, float* %ptr, align 4
+ %mul28 = fmul float %add1, %mul10
+ %mul33 = fmul float %add5, %mul12
+ %add34 = fadd float %mul33, %mul28
+ store float %add34, float* %ptr, align 4
+ %mul240 = fmul float undef, %mul9
+ %add246 = fadd float %mul240, undef
+ store float %add246, float* %ptr, align 4
+ %mul52 = fmul float undef, %mul10
+ %mul57 = fmul float undef, %mul12
+ %add58 = fadd float %mul57, %mul52
+ store float %add58, float* %ptr, align 4
+ %mul27 = fmul float 0.000000e+00, %mul9
+ %mul81 = fmul float undef, %mul10
+ %add82 = fadd float %mul27, %mul81
+ store float %add82, float* %ptr, align 4
+ call void @llvm.trap()
+ unreachable
+}
+
+declare void @llvm.trap()
+
+; Regression test for PR20057: "permanent hazard"'
+; Resource contention on LDST.
+; CHECK: ********** MI Scheduling **********
+; CHECK: testLdStConflict
+; CHECK: *** Final schedule for BB#1 ***
+; CHECK: LD4Fourv2d
+; CHECK: STRQui
+; CHECK: ********** INTERVALS **********
+define void @testLdStConflict() {
+entry:
+ br label %loop
+
+loop:
+ %0 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8* null)
+ %ptr = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr, align 4
+ %ptr1 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr1, align 4
+ %ptr2 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr2, align 4
+ %ptr3 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr3, align 4
+ %ptr4 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr4, align 4
+ br label %loop
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8*)
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
new file mode 100644
index 0000000..238474a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -0,0 +1,112 @@
+; REQUIRES: asserts
+;
+; The Cortext-A57 machine model will avoid scheduling load instructions in
+; succession because loads on the A57 have a latency of 4 cycles and they all
+; issue to the same pipeline. Instead, it will move other instructions between
+; the loads to avoid unnecessary stalls. The generic machine model schedules 4
+; loads consecutively for this case and will cause stalls.
+;
+; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+; CHECK: ********** MI Scheduling **********
+; CHECK: main:BB#2
+; CHECK LDR
+; CHECK Latency : 4
+; CHECK: *** Final schedule for BB#2 ***
+; CHECK: LDR
+; CHECK: LDR
+; CHECK-NOT: LDR
+; CHECK: {{.*}}
+; CHECK: ********** MI Scheduling **********
+
+@main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4
+@main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca [8 x i32], align 4
+ %y = alloca [8 x i32], align 4
+ %i = alloca i32, align 4
+ %xx = alloca i32, align 4
+ %yy = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = bitcast [8 x i32]* %x to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
+ %1 = bitcast [8 x i32]* %y to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
+ store i32 0, i32* %xx, align 4
+ store i32 0, i32* %yy, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %2 = load i32* %i, align 4
+ %cmp = icmp slt i32 %2, 8
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %3 = load i32* %yy, align 4
+ %4 = load i32* %i, align 4
+ %idxprom = sext i32 %4 to i64
+ %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
+ %5 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %5, 1
+ store i32 %add, i32* %xx, align 4
+ %6 = load i32* %xx, align 4
+ %add1 = add nsw i32 %6, 12
+ store i32 %add1, i32* %xx, align 4
+ %7 = load i32* %xx, align 4
+ %add2 = add nsw i32 %7, 23
+ store i32 %add2, i32* %xx, align 4
+ %8 = load i32* %xx, align 4
+ %add3 = add nsw i32 %8, 34
+ store i32 %add3, i32* %xx, align 4
+ %9 = load i32* %i, align 4
+ %idxprom4 = sext i32 %9 to i64
+ %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
+ %10 = load i32* %arrayidx5, align 4
+
+ %add4 = add nsw i32 %9, %add
+ %add5 = add nsw i32 %10, %add1
+ %add6 = add nsw i32 %add4, %add5
+
+ %add7 = add nsw i32 %9, %add3
+ %add8 = add nsw i32 %10, %add4
+ %add9 = add nsw i32 %add7, %add8
+
+ %add10 = add nsw i32 %9, %add6
+ %add11 = add nsw i32 %10, %add7
+ %add12 = add nsw i32 %add10, %add11
+
+ %add13 = add nsw i32 %9, %add9
+ %add14 = add nsw i32 %10, %add10
+ %add15 = add nsw i32 %add13, %add14
+
+ store i32 %add15, i32* %xx, align 4
+
+ %div = sdiv i32 %4, %5
+
+ store i32 %div, i32* %yy, align 4
+
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %11 = load i32* %i, align 4
+ %inc = add nsw i32 %11, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %12 = load i32* %xx, align 4
+ %13 = load i32* %yy, align 4
+ %add67 = add nsw i32 %12, %13
+ ret i32 %add67
+}
+
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
index 97bfb5c..07373cc 100644
--- a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
+++ b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -6,9 +6,10 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: shiftable
-; CHECK: *** Final schedule for BB#0 ***
-; CHECK: ADDXrr %vreg0, %vreg2
-; CHECK: ADDXrs %vreg0, %vreg2, 5
+; CHECK: SU(2): %vreg2<def> = SUBXri %vreg1, 20, 0
+; CHECK: Successors:
+; CHECK-NEXT: val SU(4): Latency=1 Reg=%vreg2
+; CHECK-NEXT: val SU(3): Latency=2 Reg=%vreg2
; CHECK: ********** INTERVALS **********
define i64 @shiftable(i64 %A, i64 %B) {
%tmp0 = sub i64 %B, 20
diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll
index cfc2ebf..1cfba82 100644
--- a/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -842,7 +842,7 @@ define <2 x i64> @scalar_to_vector.v2i64(i64 %a) {
define <8 x i8> @testDUP.v1i8(<1 x i8> %a) {
; CHECK-LABEL: testDUP.v1i8:
-; CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
+; CHECK: dup v0.8b, v0.b[0]
%b = extractelement <1 x i8> %a, i32 0
%c = insertelement <8 x i8> undef, i8 %b, i32 0
%d = insertelement <8 x i8> %c, i8 %b, i32 1
@@ -857,7 +857,7 @@ define <8 x i8> @testDUP.v1i8(<1 x i8> %a) {
define <8 x i16> @testDUP.v1i16(<1 x i16> %a) {
; CHECK-LABEL: testDUP.v1i16:
-; CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
+; CHECK: dup v0.8h, v0.h[0]
%b = extractelement <1 x i16> %a, i32 0
%c = insertelement <8 x i16> undef, i16 %b, i32 0
%d = insertelement <8 x i16> %c, i16 %b, i32 1
@@ -872,7 +872,7 @@ define <8 x i16> @testDUP.v1i16(<1 x i16> %a) {
define <4 x i32> @testDUP.v1i32(<1 x i32> %a) {
; CHECK-LABEL: testDUP.v1i32:
-; CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
+; CHECK: dup v0.4s, v0.s[0]
%b = extractelement <1 x i32> %a, i32 0
%c = insertelement <4 x i32> undef, i32 %b, i32 0
%d = insertelement <4 x i32> %c, i32 %b, i32 1
@@ -1411,35 +1411,35 @@ define <16 x i8> @concat_vector_v16i8_const() {
define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) {
; CHECK-LABEL: concat_vector_v4i16:
-; CHECK: dup {{v[0-9]+}}.4h, {{w[0-9]+}}
+; CHECK: dup v0.4h, v0.h[0]
%r = shufflevector <1 x i16> %a, <1 x i16> undef, <4 x i32> zeroinitializer
ret <4 x i16> %r
}
define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) {
; CHECK-LABEL: concat_vector_v4i32:
-; CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
+; CHECK: dup v0.4s, v0.s[0]
%r = shufflevector <1 x i32> %a, <1 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %r
}
define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) {
; CHECK-LABEL: concat_vector_v8i8:
-; CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
+; CHECK: dup v0.8b, v0.b[0]
%r = shufflevector <1 x i8> %a, <1 x i8> undef, <8 x i32> zeroinitializer
ret <8 x i8> %r
}
define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) {
; CHECK-LABEL: concat_vector_v8i16:
-; CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
+; CHECK: dup v0.8h, v0.h[0]
%r = shufflevector <1 x i16> %a, <1 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %r
}
define <16 x i8> @concat_vector_v16i8(<1 x i8> %a) {
; CHECK-LABEL: concat_vector_v16i8:
-; CHECK: dup {{v[0-9]+}}.16b, {{w[0-9]+}}
+; CHECK: dup v0.16b, v0.b[0]
%r = shufflevector <1 x i8> %a, <1 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %r
}
diff --git a/test/CodeGen/AArch64/arm64-neon-select_cc.ll b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
index 255b90d..95c582a 100644
--- a/test/CodeGen/AArch64/arm64-neon-select_cc.ll
+++ b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
@@ -136,8 +136,8 @@ define <2x i64> @test_select_cc_v2i64(i64 %a, i64 %b, <2x i64> %c, <2x i64> %d )
define <1 x float> @test_select_cc_v1f32(float %a, float %b, <1 x float> %c, <1 x float> %d ) {
; CHECK-LABEL: test_select_cc_v1f32:
-; CHECK: fcmp s0, s1
-; CHECK-NEXT: fcsel s0, s2, s3, eq
+; CHECK: fcmeq [[MASK:v[0-9]+]].2s, v0.2s, v1.2s
+; CHECK-NEXT: bsl [[MASK]].8b, v2.8b, v3.8b
%cmp31 = fcmp oeq float %a, %b
%e = select i1 %cmp31, <1 x float> %c, <1 x float> %d
ret <1 x float> %e
diff --git a/test/CodeGen/AArch64/arm64-shrink-v1i64.ll b/test/CodeGen/AArch64/arm64-shrink-v1i64.ll
new file mode 100644
index 0000000..f31a570
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-shrink-v1i64.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=arm64 < %s
+
+; The DAGCombiner tries to do following shrink:
+; Convert x+y to (VT)((SmallVT)x+(SmallVT)y)
+; But currently it can't handle vector type and will trigger an assertion failure
+; when it tries to generate an add mixed using vector type and scaler type.
+; This test checks that such assertion failur should not happen.
+define <1 x i64> @dotest(<1 x i64> %in0) {
+entry:
+ %0 = add <1 x i64> %in0, %in0
+ %vshl_n = shl <1 x i64> %0, <i64 32>
+ %vsra_n = ashr <1 x i64> %vshl_n, <i64 32>
+ ret <1 x i64> %vsra_n
+}
diff --git a/test/CodeGen/AArch64/arm64-2014-04-28-sqshl-uqshl-i64Contant.ll b/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
index 3949b85..3949b85 100644
--- a/test/CodeGen/AArch64/arm64-2014-04-28-sqshl-uqshl-i64Contant.ll
+++ b/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
diff --git a/test/CodeGen/AArch64/arm64-vcvt.ll b/test/CodeGen/AArch64/arm64-vcvt.ll
index 8c9e4e9..6570f0e 100644
--- a/test/CodeGen/AArch64/arm64-vcvt.ll
+++ b/test/CodeGen/AArch64/arm64-vcvt.ll
@@ -665,19 +665,19 @@ define <2 x double> @ucvtf_2dc(<2 x i64> %A) nounwind {
;CHECK-LABEL: autogen_SD28458:
;CHECK: fcvt
;CHECK: ret
-define void @autogen_SD28458() {
- %Tr53 = fptrunc <8 x double> undef to <8 x float>
- store <8 x float> %Tr53, <8 x float>* undef
+define void @autogen_SD28458(<8 x double> %val.f64, <8 x float>* %addr.f32) {
+ %Tr53 = fptrunc <8 x double> %val.f64 to <8 x float>
+ store <8 x float> %Tr53, <8 x float>* %addr.f32
ret void
}
;CHECK-LABEL: autogen_SD19225:
;CHECK: fcvt
;CHECK: ret
-define void @autogen_SD19225() {
- %A = load <8 x float>* undef
+define void @autogen_SD19225(<8 x double>* %addr.f64, <8 x float>* %addr.f32) {
+ %A = load <8 x float>* %addr.f32
%Tr53 = fpext <8 x float> %A to <8 x double>
- store <8 x double> %Tr53, <8 x double>* undef
+ store <8 x double> %Tr53, <8 x double>* %addr.f64
ret void
}
diff --git a/test/CodeGen/AArch64/arm64-vshift.ll b/test/CodeGen/AArch64/arm64-vshift.ll
index 82ae486..65bd50c 100644
--- a/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/test/CodeGen/AArch64/arm64-vshift.ll
@@ -1313,6 +1313,15 @@ define <8 x i8> @uqshli8b(<8 x i8>* %A) nounwind {
ret <8 x i8> %tmp3
}
+define <8 x i8> @uqshli8b_1(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: uqshli8b_1:
+;CHECK: movi.8b [[REG:v[0-9]+]], #0x8
+;CHECK: uqshl.8b v0, v0, [[REG]]
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
+ ret <8 x i8> %tmp3
+}
+
define <4 x i16> @uqshli4h(<4 x i16>* %A) nounwind {
;CHECK-LABEL: uqshli4h:
;CHECK: uqshl.4h v0, {{v[0-9]+}}, #1
diff --git a/test/CodeGen/AArch64/arm64-xaluo.ll b/test/CodeGen/AArch64/arm64-xaluo.ll
index 6cffbde..0c300de 100644
--- a/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm64 | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-atomic-cfg-tidy=0 | FileCheck %s
;
; Get the actual value of the overflow bit.
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
index 58b5d1d..26301b9 100644
--- a/test/CodeGen/AArch64/atomic-ops.ll
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -878,7 +878,9 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %old = extractvalue { i8, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -889,8 +891,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK: stxrb [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stxrb [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
@@ -900,7 +901,9 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %old = extractvalue { i16, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -911,8 +914,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK: stlxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxrh [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
@@ -922,7 +924,9 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %old = extractvalue { i32, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -933,8 +937,7 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK: stlxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxr [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
@@ -944,7 +947,9 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %old = extractvalue { i64, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
index 1eec4cc..3a5dbdc 100644
--- a/test/CodeGen/AArch64/blockaddress.ll
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
@addr = global i8* null
diff --git a/test/CodeGen/AArch64/branch-relax-asm.ll b/test/CodeGen/AArch64/branch-relax-asm.ll
new file mode 100644
index 0000000..7409c84
--- /dev/null
+++ b/test/CodeGen/AArch64/branch-relax-asm.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=aarch64-apple-ios7.0 -disable-block-placement -aarch64-tbz-offset-bits=4 -o - %s | FileCheck %s
+define i32 @test_asm_length(i32 %in) {
+; CHECK-LABEL: test_asm_length:
+
+ ; It would be more natural to use just one "tbnz %false" here, but if the
+ ; number of instructions in the asm is counted reasonably, that block is out
+ ; of the limited range we gave tbz. So branch relaxation has to invert the
+ ; condition.
+; CHECK: tbz w0, #0, [[TRUE:LBB[0-9]+_[0-9]+]]
+; CHECK: b [[FALSE:LBB[0-9]+_[0-9]+]]
+
+; CHECK: [[TRUE]]:
+; CHECK: orr w0, wzr, #0x4
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: ret
+
+; CHECK: [[FALSE]]:
+; CHECK: ret
+
+ %val = and i32 %in, 1
+ %tst = icmp eq i32 %val, 0
+ br i1 %tst, label %true, label %false
+
+true:
+ call void asm sideeffect "nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop", ""()
+ ret i32 4
+
+false:
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/breg.ll b/test/CodeGen/AArch64/breg.ll
index 591f483..9524044 100644
--- a/test/CodeGen/AArch64/breg.ll
+++ b/test/CodeGen/AArch64/breg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
@stored_label = global i8* null
diff --git a/test/CodeGen/AArch64/cmpxchg-idioms.ll b/test/CodeGen/AArch64/cmpxchg-idioms.ll
new file mode 100644
index 0000000..0c008c2
--- /dev/null
+++ b/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -0,0 +1,93 @@
+; RUN: llc -mtriple=aarch64-apple-ios7.0 -o - %s | FileCheck %s
+
+define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_return:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxr [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: orr w0, wzr, #0x1
+; CHECK: ret
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: mov w0, wzr
+; CHECK: ret
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %conv = zext i1 %success to i32
+ ret i32 %conv
+}
+
+define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
+; CHECK-LABEL: test_return_bool:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxrb [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1, uxtb
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxrb [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+ ; FIXME: DAG combine should be able to deal with this.
+; CHECK: orr [[TMP:w[0-9]+]], wzr, #0x1
+; CHECK: eor w0, [[TMP]], #0x1
+; CHECK: ret
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: mov [[TMP:w[0-9]+]], wzr
+; CHECK: eor w0, [[TMP]], #0x1
+; CHECK: ret
+
+ %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
+ %success = extractvalue { i8, i1 } %pair, 1
+ %failure = xor i1 %success, 1
+ ret i1 %failure
+}
+
+define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_conditional:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxr [[STATUS:w[0-9]+]], w2, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: b _bar
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: b _baz
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ tail call void @bar() #2
+ br label %end
+
+false:
+ tail call void @baz() #2
+ br label %end
+
+end:
+ ret void
+}
+
+declare void @bar()
+declare void @baz()
diff --git a/test/CodeGen/AArch64/compiler-ident.ll b/test/CodeGen/AArch64/compiler-ident.ll
new file mode 100644
index 0000000..0350571
--- /dev/null
+++ b/test/CodeGen/AArch64/compiler-ident.ll
@@ -0,0 +1,12 @@
+; RUN: llc -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
+
+; ModuleID = 'compiler-ident.c'
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; CHECK: .ident "some LLVM version"
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"some LLVM version"}
+
diff --git a/test/CodeGen/AArch64/complex-fp-to-int.ll b/test/CodeGen/AArch64/complex-fp-to-int.ll
new file mode 100644
index 0000000..13cf762
--- /dev/null
+++ b/test/CodeGen/AArch64/complex-fp-to-int.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x i64> @test_v2f32_to_signed_v2i64(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i64:
+; CHECK: fcvtl [[VAL64:v[0-9]+]].2d, v0.2s
+; CHECK: fcvtzs.2d v0, [[VAL64]]
+
+ %val = fptosi <2 x float> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <2 x i64> @test_v2f32_to_unsigned_v2i64(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i64:
+; CHECK: fcvtl [[VAL64:v[0-9]+]].2d, v0.2s
+; CHECK: fcvtzu.2d v0, [[VAL64]]
+
+ %val = fptoui <2 x float> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <2 x i16> @test_v2f32_to_signed_v2i16(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i16:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptosi <2 x float> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i16> @test_v2f32_to_unsigned_v2i16(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i16:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptoui <2 x float> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i8> @test_v2f32_to_signed_v2i8(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i8:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptosi <2 x float> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <2 x i8> @test_v2f32_to_unsigned_v2i8(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i8:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptoui <2 x float> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <4 x i16> @test_v4f32_to_signed_v4i16(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_signed_v4i16:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptosi <4 x float> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <4 x i16> @test_v4f32_to_unsigned_v4i16(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_unsigned_v4i16:
+; CHECK: fcvtzu.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptoui <4 x float> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <4 x i8> @test_v4f32_to_signed_v4i8(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_signed_v4i8:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptosi <4 x float> %in to <4 x i8>
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @test_v4f32_to_unsigned_v4i8(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_unsigned_v4i8:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptoui <4 x float> %in to <4 x i8>
+ ret <4 x i8> %val
+}
+
+define <2 x i32> @test_v2f64_to_signed_v2i32(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i32:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x i32> @test_v2f64_to_unsigned_v2i32(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i32:
+; CHECK: fcvtzu.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x i16> @test_v2f64_to_signed_v2i16(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i16:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i16> @test_v2f64_to_unsigned_v2i16(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i16:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i8> @test_v2f64_to_signed_v2i8(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i8:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <2 x i8> @test_v2f64_to_unsigned_v2i8(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i8:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i8>
+ ret <2 x i8> %val
+}
diff --git a/test/CodeGen/AArch64/complex-int-to-fp.ll b/test/CodeGen/AArch64/complex-int-to-fp.ll
new file mode 100644
index 0000000..5c943f9
--- /dev/null
+++ b/test/CodeGen/AArch64/complex-int-to-fp.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; CHECK: autogen_SD19655
+; CHECK: scvtf
+; CHECK: ret
+define void @autogen_SD19655(<2 x i64>* %addr, <2 x float>* %addrfloat) {
+ %T = load <2 x i64>* %addr
+ %F = sitofp <2 x i64> %T to <2 x float>
+ store <2 x float> %F, <2 x float>* %addrfloat
+ ret void
+}
+
+define <2 x double> @test_signed_v2i32_to_v2f64(<2 x i32> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i32_to_v2f64:
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], v0, #0
+; CHECK-NEXT: scvtf.2d v0, [[VAL64]]
+; CHECK-NEXT: ret
+ %conv = sitofp <2 x i32> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_unsigned_v2i32_to_v2f64(<2 x i32> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i32_to_v2f64
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], v0, #0
+; CHECK-NEXT: ucvtf.2d v0, [[VAL64]]
+; CHECK-NEXT: ret
+ %conv = uitofp <2 x i32> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_signed_v2i16_to_v2f64(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i16_to_v2f64:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #16
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #16
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: scvtf.2d v0, [[VAL64]]
+
+ %conv = sitofp <2 x i16> %v to <2 x double>
+ ret <2 x double> %conv
+}
+define <2 x double> @test_unsigned_v2i16_to_v2f64(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i16_to_v2f64
+; CHECK: movi d[[MASK:[0-9]+]], #0x00ffff0000ffff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: ucvtf.2d v0, [[VAL64]]
+
+ %conv = uitofp <2 x i16> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_signed_v2i8_to_v2f64(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i8_to_v2f64:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #24
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #24
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: scvtf.2d v0, [[VAL64]]
+
+ %conv = sitofp <2 x i8> %v to <2 x double>
+ ret <2 x double> %conv
+}
+define <2 x double> @test_unsigned_v2i8_to_v2f64(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i8_to_v2f64
+; CHECK: movi d[[MASK:[0-9]+]], #0x0000ff000000ff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: ucvtf.2d v0, [[VAL64]]
+
+ %conv = uitofp <2 x i8> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x float> @test_signed_v2i64_to_v2f32(<2 x i64> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i64_to_v2f32:
+; CHECK: scvtf.2d [[VAL64:v[0-9]+]], v0
+; CHECK: fcvtn v0.2s, [[VAL64]].2d
+
+ %conv = sitofp <2 x i64> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i64_to_v2f32(<2 x i64> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i64_to_v2f32
+; CHECK: ucvtf.2d [[VAL64:v[0-9]+]], v0
+; CHECK: fcvtn v0.2s, [[VAL64]].2d
+
+ %conv = uitofp <2 x i64> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <2 x float> @test_signed_v2i16_to_v2f32(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i16_to_v2f32:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #16
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #16
+; CHECK: scvtf.2s v0, [[VAL32]]
+
+ %conv = sitofp <2 x i16> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i16_to_v2f32(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i16_to_v2f32
+; CHECK: movi d[[MASK:[0-9]+]], #0x00ffff0000ffff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ucvtf.2s v0, [[VAL32]]
+
+ %conv = uitofp <2 x i16> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <2 x float> @test_signed_v2i8_to_v2f32(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i8_to_v2f32:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #24
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #24
+; CHECK: scvtf.2s v0, [[VAL32]]
+
+ %conv = sitofp <2 x i8> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i8_to_v2f32(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i8_to_v2f32
+; CHECK: movi d[[MASK:[0-9]+]], #0x0000ff000000ff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ucvtf.2s v0, [[VAL32]]
+
+ %conv = uitofp <2 x i8> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <4 x float> @test_signed_v4i16_to_v4f32(<4 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v4i16_to_v4f32:
+; CHECK: sshll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: scvtf.4s v0, [[VAL32]]
+
+ %conv = sitofp <4 x i16> %v to <4 x float>
+ ret <4 x float> %conv
+}
+
+define <4 x float> @test_unsigned_v4i16_to_v4f32(<4 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v4i16_to_v4f32
+; CHECK: ushll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: ucvtf.4s v0, [[VAL32]]
+
+ %conv = uitofp <4 x i16> %v to <4 x float>
+ ret <4 x float> %conv
+}
+
+define <4 x float> @test_signed_v4i8_to_v4f32(<4 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v4i8_to_v4f32:
+; CHECK: shl.4h [[TMP:v[0-9]+]], v0, #8
+; CHECK: sshr.4h [[VAL16:v[0-9]+]], [[TMP]], #8
+; CHECK: sshll.4s [[VAL32:v[0-9]+]], [[VAL16]], #0
+; CHECK: scvtf.4s v0, [[VAL32]]
+
+ %conv = sitofp <4 x i8> %v to <4 x float>
+ ret <4 x float> %conv
+}
+define <4 x float> @test_unsigned_v4i8_to_v4f32(<4 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v4i8_to_v4f32
+; CHECK: bic.4h v0, #0xff, lsl #8
+; CHECK: ushll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: ucvtf.4s v0, [[VAL32]]
+
+ %conv = uitofp <4 x i8> %v to <4 x float>
+ ret <4 x float> %conv
+}
diff --git a/test/CodeGen/AArch64/directcond.ll b/test/CodeGen/AArch64/directcond.ll
index 1b51928..fbea4a6 100644
--- a/test/CodeGen/AArch64/directcond.ll
+++ b/test/CodeGen/AArch64/directcond.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s --check-prefix=CHECK
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s
define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
; CHECK-LABEL: test_select_i32:
diff --git a/test/CodeGen/AArch64/f16-convert.ll b/test/CodeGen/AArch64/f16-convert.ll
new file mode 100644
index 0000000..6fabdc5
--- /dev/null
+++ b/test/CodeGen/AArch64/f16-convert.ll
@@ -0,0 +1,254 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios -asm-verbose=false | FileCheck %s
+
+define float @load0(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load0:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %tmp = load i16* %a, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load1(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load1:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %tmp = load i16* %a, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ %conv = fpext float %tmp1 to double
+ ret double %conv
+}
+
+define float @load2(i16* nocapture readonly %a, i32 %i) nounwind {
+; CHECK-LABEL: load2:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load3(i16* nocapture readonly %a, i32 %i) nounwind {
+; CHECK-LABEL: load3:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ %conv = fpext float %tmp1 to double
+ ret double %conv
+}
+
+define float @load4(i16* nocapture readonly %a, i64 %i) nounwind {
+; CHECK-LABEL: load4:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load5(i16* nocapture readonly %a, i64 %i) nounwind {
+; CHECK-LABEL: load5:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ %conv = fpext float %tmp1 to double
+ ret double %conv
+}
+
+define float @load6(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load6:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load7(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load7:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ %conv = fpext float %tmp1 to double
+ ret double %conv
+}
+
+define float @load8(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load8:
+; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load9(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load9:
+; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16(i16 %tmp)
+ %conv = fpext float %tmp1 to double
+ ret double %conv
+}
+
+define void @store0(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store0:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %val)
+ store i16 %tmp, i16* %a, align 2
+ ret void
+}
+
+define void @store1(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store1:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %conv)
+ store i16 %tmp, i16* %a, align 2
+ ret void
+}
+
+define void @store2(i16* nocapture %a, i32 %i, float %val) nounwind {
+; CHECK-LABEL: store2:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %val)
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store3(i16* nocapture %a, i32 %i, double %val) nounwind {
+; CHECK-LABEL: store3:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %conv)
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store4(i16* nocapture %a, i64 %i, float %val) nounwind {
+; CHECK-LABEL: store4:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store5(i16* nocapture %a, i64 %i, double %val) nounwind {
+; CHECK-LABEL: store5:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store6(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store6:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, #20]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store7(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store7:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, #20]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store8(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store8:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: stur h0, [x0, #-20]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store9(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store9:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: stur h0, [x0, #-20]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+declare i16 @llvm.convert.to.fp16(float) nounwind readnone
+declare float @llvm.convert.from.fp16(i16) nounwind readnone
diff --git a/test/CodeGen/AArch64/fast-isel-mul.ll b/test/CodeGen/AArch64/fast-isel-mul.ll
new file mode 100644
index 0000000..d02c67f
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-mul.ll
@@ -0,0 +1,40 @@
+; RUN: llc -fast-isel -fast-isel-abort -mtriple=aarch64 -o - %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_mul8(i8 %lhs, i8 %rhs) {
+; CHECK-LABEL: test_mul8:
+; CHECK: mul w0, w0, w1
+; %lhs = load i8* @var8
+; %rhs = load i8* @var8
+ %prod = mul i8 %lhs, %rhs
+ store i8 %prod, i8* @var8
+ ret void
+}
+
+define void @test_mul16(i16 %lhs, i16 %rhs) {
+; CHECK-LABEL: test_mul16:
+; CHECK: mul w0, w0, w1
+ %prod = mul i16 %lhs, %rhs
+ store i16 %prod, i16* @var16
+ ret void
+}
+
+define void @test_mul32(i32 %lhs, i32 %rhs) {
+; CHECK-LABEL: test_mul32:
+; CHECK: mul w0, w0, w1
+ %prod = mul i32 %lhs, %rhs
+ store i32 %prod, i32* @var32
+ ret void
+}
+
+define void @test_mul64(i64 %lhs, i64 %rhs) {
+; CHECK-LABEL: test_mul64:
+; CHECK: mul x0, x0, x1
+ %prod = mul i64 %lhs, %rhs
+ store i64 %prod, i64* @var64
+ ret void
+}
diff --git a/test/CodeGen/AArch64/flags-multiuse.ll b/test/CodeGen/AArch64/flags-multiuse.ll
index c9b0b9f..77bbcdd 100644
--- a/test/CodeGen/AArch64/flags-multiuse.ll
+++ b/test/CodeGen/AArch64/flags-multiuse.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s
; LLVM should be able to cope with multiple uses of the same flag-setting
; instruction at different points of a routine. Either by rematerializing the
diff --git a/test/CodeGen/AArch64/funcptr_cast.ll b/test/CodeGen/AArch64/funcptr_cast.ll
new file mode 100644
index 0000000..a00b7bc
--- /dev/null
+++ b/test/CodeGen/AArch64/funcptr_cast.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+
+define i8 @test() {
+; CHECK-LABEL: @test
+; CHECK: adrp {{x[0-9]+}}, foo
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, :lo12:foo
+; CHECK: ldrb w0, [{{x[0-9]+}}]
+entry:
+ %0 = load i8* bitcast (void (...)* @foo to i8*), align 1
+ ret i8 %0
+}
+
+declare void @foo(...)
diff --git a/test/CodeGen/AArch64/global-merge-1.ll b/test/CodeGen/AArch64/global-merge-1.ll
new file mode 100644
index 0000000..68aba5e
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-1.ll
@@ -0,0 +1,26 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@m = internal global i32 0, align 4
+@n = internal global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals@PAGE
+;CHECK-APPLE-IOS-NOT: adrp
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals@PAGEOFF
+ store i32 %a1, i32* @m, align 4
+ store i32 %a2, i32* @n, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals,@object // @_MergedGlobals
+;CHECK: .local _MergedGlobals
+;CHECK: .comm _MergedGlobals,8,8
+
+;CHECK-APPLE-IOS: .zerofill __DATA,__bss,__MergedGlobals,8,3 ; @_MergedGlobals
diff --git a/test/CodeGen/AArch64/global-merge-2.ll b/test/CodeGen/AArch64/global-merge-2.ll
new file mode 100644
index 0000000..a773566
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-2.ll
@@ -0,0 +1,51 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@x = global i32 0, align 4
+@y = global i32 0, align 4
+@z = global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS-LABEL: _f1:
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @x, align 4
+ store i32 %a2, i32* @y, align 4
+ ret void
+}
+
+define void @g1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS-LABEL: _g1:
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @y, align 4
+ store i32 %a2, i32* @z, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals_x,@object // @_MergedGlobals_x
+;CHECK: .globl _MergedGlobals_x
+;CHECK: .align 3
+;CHECK: _MergedGlobals_x:
+;CHECK: .size _MergedGlobals_x, 12
+
+;CHECK: .globl x
+;CHECK: x = _MergedGlobals_x
+;CHECK: .globl y
+;CHECK: y = _MergedGlobals_x+4
+;CHECK: .globl z
+;CHECK: z = _MergedGlobals_x+8
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_x ; @_MergedGlobals_x
+;CHECK-APPLE-IOS: .zerofill __DATA,__common,__MergedGlobals_x,12,3
+
+;CHECK-APPLE-IOS: .globl _x
+;CHECK-APPLE-IOS: _x = __MergedGlobals_x
+;CHECK-APPLE-IOS: .globl _y
+;CHECK-APPLE-IOS: _y = __MergedGlobals_x+4
+;CHECK-APPLE-IOS: .globl _z
+;CHECK-APPLE-IOS: _z = __MergedGlobals_x+8
+;CHECK-APPLE-IOS: .subsections_via_symbols
diff --git a/test/CodeGen/AArch64/global-merge-3.ll b/test/CodeGen/AArch64/global-merge-3.ll
new file mode 100644
index 0000000..d455d40
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-3.ll
@@ -0,0 +1,51 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@x = global [1000 x i32] zeroinitializer, align 1
+@y = global [1000 x i32] zeroinitializer, align 1
+@z = internal global i32 1, align 4
+
+define void @f1(i32 %a1, i32 %a2, i32 %a3) {
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS-NOT: adrp
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS: adrp x9, __MergedGlobals_y@PAGE
+;CHECK-APPLE-IOS: add x9, x9, __MergedGlobals_y@PAGEOFF
+ %x3 = getelementptr inbounds [1000 x i32]* @x, i32 0, i64 3
+ %y3 = getelementptr inbounds [1000 x i32]* @y, i32 0, i64 3
+ store i32 %a1, i32* %x3, align 4
+ store i32 %a2, i32* %y3, align 4
+ store i32 %a3, i32* @z, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals_x,@object // @_MergedGlobals_x
+;CHECK: .globl _MergedGlobals_x
+;CHECK: .align 4
+;CHECK: _MergedGlobals_x:
+;CHECK: .size _MergedGlobals_x, 4004
+
+;CHECK: .type _MergedGlobals_y,@object // @_MergedGlobals_y
+;CHECK: .globl _MergedGlobals_y
+;CHECK: _MergedGlobals_y:
+;CHECK: .size _MergedGlobals_y, 4000
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_x ; @_MergedGlobals_x
+;CHECK-APPLE-IOS: .align 4
+;CHECK-APPLE-IOS: __MergedGlobals_x:
+;CHECK-APPLE-IOS: .long 1
+;CHECK-APPLE-IOS: .space 4000
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_y ; @_MergedGlobals_y
+;CHECK-APPLE-IOS: .zerofill __DATA,__common,__MergedGlobals_y,4000,4
+
+;CHECK: .globl x
+;CHECK: x = _MergedGlobals_x+4
+;CHECK: .globl y
+;CHECK: y = _MergedGlobals_y
+
+;CHECK-APPLE-IOS:.globl _x
+;CHECK-APPLE-IOS: _x = __MergedGlobals_x+4
+;CHECK-APPLE-IOS:.globl _y
+;CHECK-APPLE-IOS: _y = __MergedGlobals_y
diff --git a/test/CodeGen/AArch64/global-merge-4.ll b/test/CodeGen/AArch64/global-merge-4.ll
new file mode 100644
index 0000000..a525ccd
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-4.ll
@@ -0,0 +1,73 @@
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -o - | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+@bar = internal global [5 x i32] zeroinitializer, align 4
+@baz = internal global [5 x i32] zeroinitializer, align 4
+@foo = internal global [5 x i32] zeroinitializer, align 4
+
+; Function Attrs: nounwind ssp
+define internal void @initialize() #0 {
+ %1 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %1, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
+ %2 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %2, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
+ %3 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
+ %4 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %4, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
+ %5 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %5, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
+ %6 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
+ %7 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %7, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
+ %8 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %8, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
+ %9 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
+ %10 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %10, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
+ ret void
+}
+
+declare i32 @calc(...)
+
+; Function Attrs: nounwind ssp
+define internal void @calculate() #0 {
+ %1 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
+ %2 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
+ %3 = mul nsw i32 %2, %1
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 0), align 4
+ %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
+ %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
+ %6 = mul nsw i32 %5, %4
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 1), align 4
+ %7 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
+ %8 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
+ %9 = mul nsw i32 %8, %7
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 2), align 4
+ %10 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
+ %11 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
+ %12 = mul nsw i32 %11, %10
+ store i32 %12, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 3), align 4
+ %13 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
+ %14 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
+ %15 = mul nsw i32 %14, %13
+ store i32 %15, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 4), align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone ssp
+define internal i32* @returnFoo() #1 {
+ ret i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 0)
+}
+
+;CHECK: .type _MergedGlobals,@object // @_MergedGlobals
+;CHECK: .local _MergedGlobals
+;CHECK: .comm _MergedGlobals,60,16
+
+attributes #0 = { nounwind ssp }
+attributes #1 = { nounwind readnone ssp }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/AArch64/global-merge.ll b/test/CodeGen/AArch64/global-merge.ll
new file mode 100644
index 0000000..aed1dc4
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck --check-prefix=NO-MERGE %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -O0 -global-merge-on-external=true | FileCheck --check-prefix=NO-MERGE %s
+
+; RUN: llc < %s -mtriple=aarch64-apple-ios -O0 | FileCheck %s --check-prefix=CHECK-APPLE-IOS-NO-MERGE
+; RUN: llc < %s -mtriple=aarch64-apple-ios -O0 -global-merge-on-external=true | FileCheck %s --check-prefix=CHECK-APPLE-IOS-NO-MERGE
+
+; FIXME: add O1/O2 test for aarch64-none-linux-gnu and aarch64-apple-ios
+
+@m = internal global i32 0, align 4
+@n = internal global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+; CHECK-LABEL: f1:
+; CHECK: adrp x{{[0-9]+}}, _MergedGlobals
+; CHECK-NOT: adrp
+
+; CHECK-APPLE-IOS-LABEL: f1:
+; CHECK-APPLE-IOS: adrp x{{[0-9]+}}, __MergedGlobals
+; CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @m, align 4
+ store i32 %a2, i32* @n, align 4
+ ret void
+}
+
+; CHECK: .local _MergedGlobals
+; CHECK: .comm _MergedGlobals,8,8
+; NO-MERGE-NOT: .local _MergedGlobals
+
+; CHECK-APPLE-IOS: .zerofill __DATA,__bss,__MergedGlobals,8,3
+; CHECK-APPLE-IOS-NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,8,3
diff --git a/test/CodeGen/AArch64/i128-fast-isel-fallback.ll b/test/CodeGen/AArch64/i128-fast-isel-fallback.ll
new file mode 100644
index 0000000..1cffbf3
--- /dev/null
+++ b/test/CodeGen/AArch64/i128-fast-isel-fallback.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O0 -mtriple=arm64-apple-ios7.0 -mcpu=generic < %s | FileCheck %s
+
+; Function Attrs: nounwind ssp
+define void @test1() {
+ %1 = sext i32 0 to i128
+ call void @test2(i128 %1)
+ ret void
+
+; The i128 is 0 so the we can test to make sure it is propogated into the x
+; registers that make up the i128 pair
+
+; CHECK: mov x0, xzr
+; CHECK: mov x1, x0
+; CHECK: bl _test2
+
+}
+
+declare void @test2(i128)
diff --git a/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll
new file mode 100644
index 0000000..645214a
--- /dev/null
+++ b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll
@@ -0,0 +1,26 @@
+; We actually need to use -filetype=obj in this test because if we output
+; assembly, the current code path will bypass the parser and just write the
+; raw text out to the Streamer. We need to actually parse the inlineasm to
+; demonstrate the bug. Going the asm->obj route does not show the issue.
+; RUN: llc -mtriple=aarch64 < %s -filetype=obj | llvm-objdump -arch=aarch64 -d - | FileCheck %s
+
+; CHECK-LABEL: foo:
+; CHECK: a0 79 95 d2 movz x0, #0xabcd
+; CHECK: c0 03 5f d6 ret
+define i32 @foo() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ldr $0,=0xabcd", "=r"() nounwind
+ ret i32 %0
+}
+; CHECK-LABEL: bar:
+; CHECK: 40 00 00 58 ldr x0, #8
+; CHECK: c0 03 5f d6 ret
+; Make sure the constant pool entry comes after the return
+; CHECK-LABEL: $d.1:
+define i32 @bar() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ldr $0,=0x10001", "=r"() nounwind
+ ret i32 %0
+}
+
+
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
index 1dfb789..69fbd99 100644
--- a/test/CodeGen/AArch64/jump-table.ll
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -1,6 +1,6 @@
-; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -o - %s | FileCheck --check-prefix=CHECK-PIC %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s
define i32 @test_jumptable(i32 %in) {
; CHECK: test_jumptable
diff --git a/test/CodeGen/AArch64/ldst-opt.ll b/test/CodeGen/AArch64/ldst-opt.ll
index 1ce5c95..e4f4295 100644
--- a/test/CodeGen/AArch64/ldst-opt.ll
+++ b/test/CodeGen/AArch64/ldst-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s
; This file contains tests for the AArch64 load/store optimizer.
@@ -166,6 +166,217 @@ bar:
; Check the following transform:
;
+; add x8, x8, #16
+; ...
+; ldr X, [x8]
+; ->
+; ldr X, [x8, #16]!
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+%pre.struct.i32 = type { i32, i32, i32}
+%pre.struct.i64 = type { i32, i64, i64}
+%pre.struct.i128 = type { i32, <2 x i64>, <2 x i64>}
+%pre.struct.float = type { i32, float, float}
+%pre.struct.double = type { i32, double, double}
+
+define i32 @load-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
+ %pre.struct.i32* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-word2
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i32** %this
+ %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load i32* %retptr
+ ret i32 %ret
+}
+
+define i64 @load-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
+ %pre.struct.i64* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-doubleword2
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i64** %this
+ %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load i64* %retptr
+ ret i64 %ret
+}
+
+define <2 x i64> @load-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
+ %pre.struct.i128* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-quadword2
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i128** %this
+ %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load <2 x i64>* %retptr
+ ret <2 x i64> %ret
+}
+
+define float @load-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
+ %pre.struct.float* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-float2
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.float** %this
+ %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load float* %retptr
+ ret float %ret
+}
+
+define double @load-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
+ %pre.struct.double* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-double2
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.double** %this
+ %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load double* %retptr
+ ret double %ret
+}
+
+; Check the following transform:
+;
+; add x8, x8, #16
+; ...
+; str X, [x8]
+; ->
+; str X, [x8, #16]!
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @store-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
+ %pre.struct.i32* %load2,
+ i32 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-word2
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i32** %this
+ %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store i32 %val, i32* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
+ %pre.struct.i64* %load2,
+ i64 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-doubleword2
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i64** %this
+ %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store i64 %val, i64* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
+ %pre.struct.i128* %load2,
+ <2 x i64> %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-quadword2
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i128** %this
+ %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store <2 x i64> %val, <2 x i64>* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
+ %pre.struct.float* %load2,
+ float %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-float2
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.float** %this
+ %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store float %val, float* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
+ %pre.struct.double* %load2,
+ double %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-double2
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.double** %this
+ %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store double %val, double* %retptr
+ ret void
+}
+
+; Check the following transform:
+;
; ldr X, [x20]
; ...
; add x20, x20, #32
@@ -294,8 +505,263 @@ exit:
ret void
}
+; Check the following transform:
+;
+; str X, [x20]
+; ...
+; add x20, x20, #32
+; ->
+; str X, [x20], #32
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @store-post-indexed-word(i32* %array, i64 %count, i32 %val) nounwind {
+; CHECK-LABEL: store-post-indexed-word
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr i32* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i32* %iv2, i64 -1
+ %load = load i32* %gep2
+ call void @use-word(i32 %load)
+ store i32 %val, i32* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i32* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-doubleword(i64* %array, i64 %count, i64 %val) nounwind {
+; CHECK-LABEL: store-post-indexed-doubleword
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr i64* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i64* %iv2, i64 -1
+ %load = load i64* %gep2
+ call void @use-doubleword(i64 %load)
+ store i64 %val, i64* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i64* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-quadword(<2 x i64>* %array, i64 %count, <2 x i64> %val) nounwind {
+; CHECK-LABEL: store-post-indexed-quadword
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #64
+entry:
+ %gep1 = getelementptr <2 x i64>* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr <2 x i64>* %iv2, i64 -1
+ %load = load <2 x i64>* %gep2
+ call void @use-quadword(<2 x i64> %load)
+ store <2 x i64> %val, <2 x i64>* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr <2 x i64>* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-float(float* %array, i64 %count, float %val) nounwind {
+; CHECK-LABEL: store-post-indexed-float
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr float* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr float* %iv2, i64 -1
+ %load = load float* %gep2
+ call void @use-float(float %load)
+ store float %val, float* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr float* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-double(double* %array, i64 %count, double %val) nounwind {
+; CHECK-LABEL: store-post-indexed-double
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr double* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr double* %iv2, i64 -1
+ %load = load double* %gep2
+ call void @use-double(double %load)
+ store double %val, double* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr double* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
declare void @use-word(i32)
declare void @use-doubleword(i64)
declare void @use-quadword(<2 x i64>)
declare void @use-float(float)
declare void @use-double(double)
+
+; Check the following transform:
+;
+; (ldr|str) X, [x20]
+; ...
+; sub x20, x20, #16
+; ->
+; (ldr|str) X, [x20], #-16
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @post-indexed-sub-word(i32* %a, i32* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-word
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #-8
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #-8
+ br label %for.body
+for.body:
+ %phi1 = phi i32* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi i32* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr i32* %phi1, i64 -1
+ %load1 = load i32* %gep1
+ %gep2 = getelementptr i32* %phi2, i64 -1
+ store i32 %load1, i32* %gep2
+ %load2 = load i32* %phi1
+ store i32 %load2, i32* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr i32* %phi2, i64 -2
+ %gep4 = getelementptr i32* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-doubleword(i64* %a, i64* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-doubleword
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #-16
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #-16
+ br label %for.body
+for.body:
+ %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr i64* %phi1, i64 -1
+ %load1 = load i64* %gep1
+ %gep2 = getelementptr i64* %phi2, i64 -1
+ store i64 %load1, i64* %gep2
+ %load2 = load i64* %phi1
+ store i64 %load2, i64* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr i64* %phi2, i64 -2
+ %gep4 = getelementptr i64* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-quadword(<2 x i64>* %a, <2 x i64>* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-quadword
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #-32
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #-32
+ br label %for.body
+for.body:
+ %phi1 = phi <2 x i64>* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi <2 x i64>* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr <2 x i64>* %phi1, i64 -1
+ %load1 = load <2 x i64>* %gep1
+ %gep2 = getelementptr <2 x i64>* %phi2, i64 -1
+ store <2 x i64> %load1, <2 x i64>* %gep2
+ %load2 = load <2 x i64>* %phi1
+ store <2 x i64> %load2, <2 x i64>* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr <2 x i64>* %phi2, i64 -2
+ %gep4 = getelementptr <2 x i64>* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-float(float* %a, float* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-float
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #-8
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #-8
+ br label %for.body
+for.body:
+ %phi1 = phi float* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi float* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr float* %phi1, i64 -1
+ %load1 = load float* %gep1
+ %gep2 = getelementptr float* %phi2, i64 -1
+ store float %load1, float* %gep2
+ %load2 = load float* %phi1
+ store float %load2, float* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr float* %phi2, i64 -2
+ %gep4 = getelementptr float* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-double(double* %a, double* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-double
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #-16
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #-16
+ br label %for.body
+for.body:
+ %phi1 = phi double* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi double* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr double* %phi1, i64 -1
+ %load1 = load double* %gep1
+ %gep2 = getelementptr double* %phi2, i64 -1
+ store double %load1, double* %gep2
+ %load2 = load double* %phi1
+ store double %load2, double* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr double* %phi2, i64 -2
+ %gep4 = getelementptr double* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/lit.local.cfg b/test/CodeGen/AArch64/lit.local.cfg
index 77493d8..125995c 100644
--- a/test/CodeGen/AArch64/lit.local.cfg
+++ b/test/CodeGen/AArch64/lit.local.cfg
@@ -2,8 +2,7 @@ import re
config.suffixes = ['.ll']
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
+if not 'AArch64' in config.root.targets:
config.unsupported = True
# For now we don't test arm64-win32.
diff --git a/test/CodeGen/AArch64/memcpy-f128.ll b/test/CodeGen/AArch64/memcpy-f128.ll
new file mode 100644
index 0000000..76db297
--- /dev/null
+++ b/test/CodeGen/AArch64/memcpy-f128.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s
+
+%structA = type { i128 }
+@stubA = internal unnamed_addr constant %structA zeroinitializer, align 8
+
+; Make sure we don't hit llvm_unreachable.
+
+define void @test1() {
+; CHECK-LABEL: @test1
+; CHECK: adrp
+; CHECK: ldr q0
+; CHECK: str q0
+; CHECK: ret
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* bitcast (%structA* @stubA to i8*), i64 48, i32 8, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
diff --git a/test/CodeGen/AArch64/mul_pow2.ll b/test/CodeGen/AArch64/mul_pow2.ll
new file mode 100644
index 0000000..efc0ec8
--- /dev/null
+++ b/test/CodeGen/AArch64/mul_pow2.ll
@@ -0,0 +1,123 @@
+; RUN: llc < %s -march=aarch64 | FileCheck %s
+
+; Convert mul x, pow2 to shift.
+; Convert mul x, pow2 +/- 1 to shift + add/sub.
+
+define i32 @test2(i32 %x) {
+; CHECK-LABEL: test2
+; CHECK: lsl w0, w0, #1
+
+ %mul = shl nsw i32 %x, 1
+ ret i32 %mul
+}
+
+define i32 @test3(i32 %x) {
+; CHECK-LABEL: test3
+; CHECK: add w0, w0, w0, lsl #1
+
+ %mul = mul nsw i32 %x, 3
+ ret i32 %mul
+}
+
+define i32 @test4(i32 %x) {
+; CHECK-LABEL: test4
+; CHECK: lsl w0, w0, #2
+
+ %mul = shl nsw i32 %x, 2
+ ret i32 %mul
+}
+
+define i32 @test5(i32 %x) {
+; CHECK-LABEL: test5
+; CHECK: add w0, w0, w0, lsl #2
+
+
+ %mul = mul nsw i32 %x, 5
+ ret i32 %mul
+}
+
+define i32 @test7(i32 %x) {
+; CHECK-LABEL: test7
+; CHECK: lsl {{w[0-9]+}}, w0, #3
+; CHECK: sub w0, {{w[0-9]+}}, w0
+
+ %mul = mul nsw i32 %x, 7
+ ret i32 %mul
+}
+
+define i32 @test8(i32 %x) {
+; CHECK-LABEL: test8
+; CHECK: lsl w0, w0, #3
+
+ %mul = shl nsw i32 %x, 3
+ ret i32 %mul
+}
+
+define i32 @test9(i32 %x) {
+; CHECK-LABEL: test9
+; CHECK: add w0, w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, 9
+ ret i32 %mul
+}
+
+; Convert mul x, -pow2 to shift.
+; Convert mul x, -(pow2 +/- 1) to shift + add/sub.
+
+define i32 @ntest2(i32 %x) {
+; CHECK-LABEL: ntest2
+; CHECK: neg w0, w0, lsl #1
+
+ %mul = mul nsw i32 %x, -2
+ ret i32 %mul
+}
+
+define i32 @ntest3(i32 %x) {
+; CHECK-LABEL: ntest3
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #1
+; CHECK: neg w0, {{w[0-9]+}}
+
+ %mul = mul nsw i32 %x, -3
+ ret i32 %mul
+}
+
+define i32 @ntest4(i32 %x) {
+; CHECK-LABEL: ntest4
+; CHECK:neg w0, w0, lsl #2
+
+ %mul = mul nsw i32 %x, -4
+ ret i32 %mul
+}
+
+define i32 @ntest5(i32 %x) {
+; CHECK-LABEL: ntest5
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #2
+; CHECK: neg w0, {{w[0-9]+}}
+ %mul = mul nsw i32 %x, -5
+ ret i32 %mul
+}
+
+define i32 @ntest7(i32 %x) {
+; CHECK-LABEL: ntest7
+; CHECK: sub w0, w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, -7
+ ret i32 %mul
+}
+
+define i32 @ntest8(i32 %x) {
+; CHECK-LABEL: ntest8
+; CHECK: neg w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, -8
+ ret i32 %mul
+}
+
+define i32 @ntest9(i32 %x) {
+; CHECK-LABEL: ntest9
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #3
+; CHECK: neg w0, {{w[0-9]+}}
+
+ %mul = mul nsw i32 %x, -9
+ ret i32 %mul
+}
diff --git a/test/CodeGen/AArch64/regress-tail-livereg.ll b/test/CodeGen/AArch64/regress-tail-livereg.ll
index e32ac84..03c3f33 100644
--- a/test/CodeGen/AArch64/regress-tail-livereg.ll
+++ b/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -17,3 +17,17 @@ define void @foo() {
; CHECK: br {{x([0-79]|1[0-8])}}
ret void
}
+
+; No matter how tempting it is, LLVM should not use x30 since that'll be
+; restored to its incoming value before the "br".
+define void @test_x30_tail() {
+; CHECK-LABEL: test_x30_tail:
+; CHECK: mov [[DEST:x[0-9]+]], x30
+; CHECK: br [[DEST]]
+ %addr = call i8* @llvm.returnaddress(i32 0)
+ %faddr = bitcast i8* %addr to void()*
+ tail call void %faddr()
+ ret void
+}
+
+declare i8* @llvm.returnaddress(i32)
diff --git a/test/CodeGen/AArch64/trunc-v1i64.ll b/test/CodeGen/AArch64/trunc-v1i64.ll
new file mode 100644
index 0000000..159b8e0
--- /dev/null
+++ b/test/CodeGen/AArch64/trunc-v1i64.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s
+
+; An optimization in DAG Combiner to fold
+; (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...))
+; will generate nodes like:
+; v1i32 trunc v1i64, v1i16 trunc v1i64, v1i8 trunc v1i64.
+; And such nodes will be defaultly scalarized in type legalization. But such
+; scalarization will cause an assertion failure, as v1i64 is a legal type in
+; AArch64. We change the default behaviour from be scalarized to be widen.
+
+; FIXME: Currently XTN is generated for v1i32, but it can be optimized.
+; Just like v1i16 and v1i8, there is no XTN generated.
+
+define <2 x i32> @test_v1i32_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i32_0:
+; CHECK: xtn v0.2s, v0.2d
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <2 x i32> <i32 0, i32 undef>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %2
+}
+
+define <2 x i32> @test_v1i32_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i32_1:
+; CHECK: xtn v0.2s, v0.2d
+; CHECK-NEXT: dup v0.2s, v0.s[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <2 x i32> <i32 undef, i32 0>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %2
+}
+
+define <4 x i16> @test_v1i16_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i16_0:
+; CHECK-NOT: xtn
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <4 x i64> %1 to <4 x i16>
+ ret <4 x i16> %2
+}
+
+define <4 x i16> @test_v1i16_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i16_1:
+; CHECK-NOT: xtn
+; CHECK: dup v0.4h, v0.h[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
+ %2 = trunc <4 x i64> %1 to <4 x i16>
+ ret <4 x i16> %2
+}
+
+define <8 x i8> @test_v1i8_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i8_0:
+; CHECK-NOT: xtn
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <8 x i64> %1 to <8 x i8>
+ ret <8 x i8> %2
+}
+
+define <8 x i8> @test_v1i8_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i8_1:
+; CHECK-NOT: xtn
+; CHECK: dup v0.8b, v0.b[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <8 x i64> %1 to <8 x i8>
+ ret <8 x i8> %2
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/tst-br.ll b/test/CodeGen/AArch64/tst-br.ll
index 8a2fe26..5dc7b5d 100644
--- a/test/CodeGen/AArch64/tst-br.ll
+++ b/test/CodeGen/AArch64/tst-br.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s
; We've got the usual issues with LLVM reordering blocks here. The
; tests are correct for the current order, but who knows when that
diff --git a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
index ca5ae8b..2597b41 100644
--- a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
+++ b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv7-eabi"
diff --git a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
index 4fb2be0..38eb0ea 100644
--- a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
+++ b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a8 -mattr=-neonfp < %s | FileCheck %s
+; RUN: llc -mcpu=cortex-a8 -mattr=-neonfp -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
; PR5423
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
index 35995b7..b040b2d 100644
--- a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
+++ b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
@@ -4,22 +4,26 @@
%struct.foo = type { i64, i64 }
-define zeroext i8 @t(%struct.foo* %this) noreturn optsize {
+define zeroext i8 @t(%struct.foo* %this, i1 %tst) noreturn optsize {
entry:
; ARM-LABEL: t:
-; ARM: str r2, [r1], r0
+; ARM-DAG: mov r[[ADDR:[0-9]+]], #8
+; ARM-DAG: mov [[VAL:r[0-9]+]], #0
+; ARM: str [[VAL]], [r[[ADDR]]], r0
; THUMB-LABEL: t:
-; THUMB-NOT: str r0, [r1], r0
-; THUMB: str r1, [r0]
+; THUMB-DAG: movs r[[ADDR:[0-9]+]], #8
+; THUMB-DAG: movs [[VAL:r[0-9]+]], #0
+; THUMB-NOT: str {{[a-z0-9]+}}, [{{[a-z0-9]+}}], {{[a-z0-9]+}}
+; THUMB: str [[VAL]], [r[[ADDR]]]
%0 = getelementptr inbounds %struct.foo* %this, i32 0, i32 1 ; <i64*> [#uses=1]
store i32 0, i32* inttoptr (i32 8 to i32*), align 8
- br i1 undef, label %bb.nph96, label %bb3
+ br i1 %tst, label %bb.nph96, label %bb3
bb3: ; preds = %entry
%1 = load i64* %0, align 4 ; <i64> [#uses=0]
- unreachable
+ ret i8 42
bb.nph96: ; preds = %entry
- unreachable
+ ret i8 3
}
diff --git a/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
index 32d350e9..e7e0580 100644
--- a/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
+++ b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=arm1136jf-s | FileCheck %s
+; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=arm1136jf-s -arm-atomic-cfg-tidy=0 | FileCheck %s
; Radar 8589805: Counting the number of microcoded operations, such as for an
; LDM instruction, was causing an assertion failure because the microop count
; was being treated as an instruction count.
@@ -11,7 +11,7 @@
define i32 @test(i32 %x) {
entry:
%0 = tail call signext i16 undef(i32* undef)
- switch i32 undef, label %bb3 [
+ switch i32 %x, label %bb3 [
i32 0, label %bb4
i32 1, label %bb1
i32 2, label %bb2
diff --git a/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
index 85a1137..3950c9e 100644
--- a/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
+++ b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv6-apple-darwin -relocation-model=pic -mcpu=arm1136jf-s | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv6-apple-darwin -relocation-model=pic -mcpu=arm1136jf-s -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://8959122 illegal register operands for UMULL instruction
; in cfrac nightly test.
; Armv6 generates a umull that must write to two distinct destination regs.
@@ -7,7 +7,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:64-n32"
target triple = "armv6-apple-darwin10"
-define void @ptoa() nounwind {
+define void @ptoa(i1 %tst, i8* %p8, i8 %val8) nounwind {
entry:
br i1 false, label %bb3, label %bb
@@ -16,7 +16,7 @@ bb: ; preds = %entry
bb3: ; preds = %bb, %entry
%0 = call noalias i8* @malloc() nounwind
- br i1 undef, label %bb46, label %bb8
+ br i1 %tst, label %bb46, label %bb8
bb8: ; preds = %bb3
%1 = getelementptr inbounds i8* %0, i32 0
@@ -35,7 +35,7 @@ bb8: ; preds = %bb3
%7 = or i8 %6, 48
%8 = add i8 %6, 87
%iftmp.5.0.1 = select i1 %5, i8 %7, i8 %8
- store i8 %iftmp.5.0.1, i8* undef, align 1
+ store i8 %iftmp.5.0.1, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -49,7 +49,7 @@ bb8: ; preds = %bb3
%13 = or i8 %12, 48
%14 = add i8 %12, 87
%iftmp.5.0.2 = select i1 %11, i8 %13, i8 %14
- store i8 %iftmp.5.0.2, i8* undef, align 1
+ store i8 %iftmp.5.0.2, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -73,8 +73,8 @@ bb8: ; preds = %bb3
%21 = udiv i32 %2, 100000
%22 = urem i32 %21, 10
%23 = icmp ult i32 %22, 10
- %iftmp.5.0.5 = select i1 %23, i8 0, i8 undef
- store i8 %iftmp.5.0.5, i8* undef, align 1
+ %iftmp.5.0.5 = select i1 %23, i8 0, i8 %val8
+ store i8 %iftmp.5.0.5, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -88,7 +88,7 @@ bb8: ; preds = %bb3
%28 = or i8 %27, 48
%29 = add i8 %27, 87
%iftmp.5.0.6 = select i1 %26, i8 %28, i8 %29
- store i8 %iftmp.5.0.6, i8* undef, align 1
+ store i8 %iftmp.5.0.6, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -102,7 +102,7 @@ bb8: ; preds = %bb3
%34 = or i8 %33, 48
%35 = add i8 %33, 87
%iftmp.5.0.7 = select i1 %32, i8 %34, i8 %35
- store i8 %iftmp.5.0.7, i8* undef, align 1
+ store i8 %iftmp.5.0.7, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -117,7 +117,7 @@ bb8: ; preds = %bb3
%41 = add i8 %39, 87
%iftmp.5.0.8 = select i1 %38, i8 %40, i8 %41
store i8 %iftmp.5.0.8, i8* null, align 1
- unreachable
+ br label %bb46
bb46: ; preds = %bb3
ret void
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index bc72e12..837feb6 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -8,7 +8,7 @@
@oStruct = external global %struct.Outer, align 4
-define void @main() nounwind {
+define void @main(i8 %val8) nounwind {
; CHECK-LABEL: main:
; CHECK-NOT: ldrd
; CHECK: mul
@@ -28,7 +28,7 @@ for.body: ; preds = %_Z14printIsNotZeroi
br i1 %tobool.i14, label %_Z14printIsNotZeroi.exit17, label %if.then.i16
if.then.i16: ; preds = %_Z14printIsNotZeroi.exit
- unreachable
+ ret void
_Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi.exit
br label %_Z14printIsNotZeroi.exit17.for.body_crit_edge
@@ -36,7 +36,7 @@ _Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi
_Z14printIsNotZeroi.exit17.for.body_crit_edge: ; preds = %_Z14printIsNotZeroi.exit17
%b.phi.trans.insert = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
%tmp3.pre = load i8* %b.phi.trans.insert, align 1
- %phitmp27 = icmp eq i8 undef, 0
+ %phitmp27 = icmp eq i8 %val8, 0
br label %for.body
for.end: ; preds = %_Z14printIsNotZeroi.exit17
diff --git a/test/CodeGen/ARM/2012-11-14-subs_carry.ll b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
index 8df295a..3308330 100644
--- a/test/CodeGen/ARM/2012-11-14-subs_carry.ll
+++ b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
;CHECK-LABEL: foo:
;CHECK: adds
diff --git a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
index 480d087..162f863 100644
--- a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
+++ b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
@@ -42,34 +42,34 @@ UnifiedReturnBlock:
ret i32 %tmp13
}
-define hidden fastcc void @t3(i8** %retaddr) {
+define hidden fastcc void @t3(i8** %retaddr, i1 %tst, i8* %p8) {
; CHECK-LABEL: t3:
; CHECK: Block address taken
; CHECK-NOT: Address of block that was removed by CodeGen
bb:
store i8* blockaddress(@t3, %KBBlockZero_return_1), i8** %retaddr
- br i1 undef, label %bb77, label %bb7.i
+ br i1 %tst, label %bb77, label %bb7.i
bb7.i: ; preds = %bb35
br label %bb2.i
KBBlockZero_return_1: ; preds = %KBBlockZero.exit
- unreachable
+ ret void
KBBlockZero_return_0: ; preds = %KBBlockZero.exit
- unreachable
+ ret void
bb77: ; preds = %bb26, %bb12, %bb
ret void
bb2.i: ; preds = %bb6.i350, %bb7.i
- br i1 undef, label %bb6.i350, label %KBBlockZero.exit
+ br i1 %tst, label %bb6.i350, label %KBBlockZero.exit
bb6.i350: ; preds = %bb2.i
br label %bb2.i
KBBlockZero.exit: ; preds = %bb2.i
- indirectbr i8* undef, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
+ indirectbr i8* %p8, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
}
@foo = global i32 ()* null
diff --git a/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
index a438c1f..05a4ef0 100644
--- a/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
+++ b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
; ModuleID = 'bugpoint-reduced-simplified.bc'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
target triple = "armv7--linux-gnueabi"
diff --git a/test/CodeGen/ARM/Windows/dllimport.ll b/test/CodeGen/ARM/Windows/dllimport.ll
new file mode 100644
index 0000000..bc737bd
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/dllimport.ll
@@ -0,0 +1,61 @@
+; RUN: llc -mtriple thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+; ModuleID = 'dllimport.c'
+
+@var = external dllimport global i32
+@ext = external global i32
+declare dllimport arm_aapcs_vfpcc i32 @external()
+declare arm_aapcs_vfpcc i32 @internal()
+
+define arm_aapcs_vfpcc i32 @get_var() {
+ %1 = load i32* @var, align 4
+ ret i32 %1
+}
+
+; CHECK-LABEL: get_var
+; CHECK: movw r0, :lower16:__imp_var
+; CHECK: movt r0, :upper16:__imp_var
+; CHECK: ldr r0, [r0]
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32 @get_ext() {
+ %1 = load i32* @ext, align 4
+ ret i32 %1
+}
+
+; CHECK-LABEL: get_ext
+; CHECK: movw r0, :lower16:ext
+; CHECK: movt r0, :upper16:ext
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32* @get_var_pointer() {
+ ret i32* @var
+}
+
+; CHECK-LABEL: get_var_pointer
+; CHECK: movw r0, :lower16:__imp_var
+; CHECK: movt r0, :upper16:__imp_var
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32 @call_external() {
+ %call = tail call arm_aapcs_vfpcc i32 @external()
+ ret i32 %call
+}
+
+; CHECK-LABEL: call_external
+; CHECK: movw r0, :lower16:__imp_external
+; CHECK: movt r0, :upper16:__imp_external
+; CHECK: ldr r0, [r0]
+; CHECK: bx r0
+
+define arm_aapcs_vfpcc i32 @call_internal() {
+ %call = tail call arm_aapcs_vfpcc i32 @internal()
+ ret i32 %call
+}
+
+; CHECK-LABEL: call_internal
+; CHECK: b internal
+
diff --git a/test/CodeGen/ARM/Windows/global-minsize.ll b/test/CodeGen/ARM/Windows/global-minsize.ll
new file mode 100644
index 0000000..c0be36c
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/global-minsize.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+@i = internal global i32 0, align 4
+
+; Function Attrs: minsize
+define arm_aapcs_vfpcc i32* @function() #0 {
+entry:
+ ret i32* @i
+}
+
+attributes #0 = { minsize }
+
+; CHECK: function:
+; CHECK: movw r0, :lower16:i
+; CHECK: movt r0, :upper16:i
+; CHECK: bx lr
diff --git a/test/CodeGen/ARM/Windows/long-calls.ll b/test/CodeGen/ARM/Windows/long-calls.ll
new file mode 100644
index 0000000..e35f414
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/long-calls.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 -arm-long-calls -o - %s \
+; RUN: | FileCheck %s
+
+declare arm_aapcs_vfpcc void @callee()
+
+define arm_aapcs_vfpcc void @caller() nounwind {
+entry:
+ tail call void @callee()
+ ret void
+}
+
+; CHECK-LABEL: caller
+; CHECK: ldr [[REG:r[0-9]+]], [[CPI:.LCPI[_0-9]+]]
+; CHECK: bx [[REG]]
+; CHECK: .align 2
+; CHECK: [[CPI]]:
+; CHECK: .long callee
+
diff --git a/test/CodeGen/ARM/Windows/structors.ll b/test/CodeGen/ARM/Windows/structors.ll
new file mode 100644
index 0000000..a1a9026
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/structors.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple thumbv7-windows-itanium -o - %s | FileCheck %s
+
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @function, i8* null }]
+
+define arm_aapcs_vfpcc void @function() {
+entry:
+ ret void
+}
+
+; CHECK: .section .CRT$XCU,"rd"
+; CHECK: .long function
+
diff --git a/test/CodeGen/ARM/Windows/vla.ll b/test/CodeGen/ARM/Windows/vla.ll
new file mode 100644
index 0000000..56901de
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/vla.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-SMALL-CODE
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -code-model=large -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-LARGE-CODE
+; RUN: llc -mtriple=thumbv7-windows-msvc -mcpu=cortex-a9 -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-MSVC
+
+define arm_aapcs_vfpcc i8 @function(i32 %sz, i32 %idx) {
+entry:
+ %vla = alloca i8, i32 %sz, align 1
+ %arrayidx = getelementptr inbounds i8* %vla, i32 %idx
+ %0 = load volatile i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK-SMALL-CODE: adds [[R4:r[0-9]+]], #7
+; CHECK-SMALL-CODE: bic [[R4]], [[R4]], #7
+; CHECK-SMALL-CODE: lsrs r4, [[R4]], #2
+; CHECK-SMALL-CODE: bl __chkstk
+; CHECK-SMALL-CODE: sub.w sp, sp, r4
+
+; CHECK-LARGE-CODE: adds [[R4:r[0-9]+]], #7
+; CHECK-LARGE-CODE: bic [[R4]], [[R4]], #7
+; CHECK-LARGE-CODE: lsrs r4, [[R4]], #2
+; CHECK-LARGE-CODE: movw [[IP:r[0-9]+]], :lower16:__chkstk
+; CHECK-LARGE-CODE: movt [[IP]], :upper16:__chkstk
+; CHECK-LARGE-CODE: blx [[IP]]
+; CHECK-LARGE-CODE: sub.w sp, sp, r4
+
+; CHECK-MSVC-NOT: __chkstk
+
diff --git a/test/CodeGen/ARM/aliases.ll b/test/CodeGen/ARM/aliases.ll
index 4de305b..f55ae10 100644
--- a/test/CodeGen/ARM/aliases.ll
+++ b/test/CodeGen/ARM/aliases.ll
@@ -29,7 +29,7 @@ define i32 @foo_f() {
@bar_i = alias internal i32* @bar
-@A = alias i64, i32* @bar
+@A = alias bitcast (i32* @bar to i64*)
define i32 @test() {
entry:
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index bf827d6..14eef83 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -1,8 +1,8 @@
-; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck -check-prefix=ARM %s
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck -check-prefix=THUMB %s
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
+; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=ARM %s
+; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=THUMB %s
+; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
; RUN: | FileCheck -check-prefix=T2 %s
-; RUN: llc -mtriple=thumbv8-eabi %s -o - | FileCheck -check-prefix=V8 %s
+; RUN: llc -mtriple=thumbv8-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=V8 %s
; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
index 9913f30..462c185 100644
--- a/test/CodeGen/ARM/atomic-64bit.ll
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -171,9 +171,10 @@ define i64 @test6(i64* %ptr, i64 %val) {
define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-LABEL: test7:
-; CHECK: dmb {{ish$}}
+; CHECK-DAG: mov [[VAL1LO:r[0-9]+]], r1
+; CHECK-DAG: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK-LE-DAG: eor [[MISMATCH_LO:r[0-9]+]], [[REG1]], r1
+; CHECK-LE-DAG: eor [[MISMATCH_LO:r[0-9]+]], [[REG1]], [[VAL1LO]]
; CHECK-LE-DAG: eor [[MISMATCH_HI:r[0-9]+]], [[REG2]], r2
; CHECK-BE-DAG: eor [[MISMATCH_LO:r[0-9]+]], [[REG2]], r2
; CHECK-BE-DAG: eor [[MISMATCH_HI:r[0-9]+]], [[REG1]], r1
@@ -189,16 +190,17 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB-LE-DAG: eor.w [[MISMATCH_LO:[a-z0-9]+]], [[REG1]], r2
; CHECK-THUMB-LE-DAG: eor.w [[MISMATCH_HI:[a-z0-9]+]], [[REG2]], r3
-; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_HI:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_LO:[a-z0-9]+]], [[REG2]]
-; CHECK-THUMB: orrs [[MISMATCH_HI]], [[MISMATCH_LO]]
+; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_HI:[a-z0-9]+]], [[REG1]], r2
+; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_LO:[a-z0-9]+]], [[REG2]], r3
+; CHECK-THUMB-LE: orrs [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-THUMB: bne
; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
; CHECK-THUMB: dmb {{ish$}}
- %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst seq_cst
+ %pair = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst seq_cst
+ %r = extractvalue { i64, i1 } %pair, 0
ret i64 %r
}
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
index a473807..629b16d 100644
--- a/test/CodeGen/ARM/atomic-cmp.ll
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -11,5 +11,6 @@ define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
; T2: ldrexb
; T2: strexb
%tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic monotonic
- ret i8 %tmp0
+ %tmp1 = extractvalue { i8, i1 } %tmp0, 0
+ ret i8 %tmp1
}
diff --git a/test/CodeGen/ARM/atomic-load-store.ll b/test/CodeGen/ARM/atomic-load-store.ll
index 45a263d..49342d2 100644
--- a/test/CodeGen/ARM/atomic-load-store.ll
+++ b/test/CodeGen/ARM/atomic-load-store.ll
@@ -5,13 +5,13 @@
; RUN: llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4
define void @test1(i32* %ptr, i32 %val1) {
-; ARM: test1
+; ARM-LABEL: test1
; ARM: dmb {{ish$}}
; ARM-NEXT: str
; ARM-NEXT: dmb {{ish$}}
-; THUMBONE: test1
+; THUMBONE-LABEL: test1
; THUMBONE: __sync_lock_test_and_set_4
-; THUMBTWO: test1
+; THUMBTWO-LABEL: test1
; THUMBTWO: dmb {{ish$}}
; THUMBTWO-NEXT: str
; THUMBTWO-NEXT: dmb {{ish$}}
@@ -20,12 +20,12 @@ define void @test1(i32* %ptr, i32 %val1) {
}
define i32 @test2(i32* %ptr) {
-; ARM: test2
+; ARM-LABEL: test2
; ARM: ldr
; ARM-NEXT: dmb {{ish$}}
-; THUMBONE: test2
+; THUMBONE-LABEL: test2
; THUMBONE: __sync_val_compare_and_swap_4
-; THUMBTWO: test2
+; THUMBTWO-LABEL: test2
; THUMBTWO: ldr
; THUMBTWO-NEXT: dmb {{ish$}}
%val = load atomic i32* %ptr seq_cst, align 4
@@ -33,22 +33,35 @@ define i32 @test2(i32* %ptr) {
}
define void @test3(i8* %ptr1, i8* %ptr2) {
-; ARM: test3
+; ARM-LABEL: test3
+; ARM-NOT: dmb
; ARM: ldrb
+; ARM-NOT: dmb
; ARM: strb
-; THUMBTWO: test3
+; ARM-NOT: dmb
+; ARM: bx lr
+
+; THUMBTWO-LABEL: test3
+; THUMBTWO-NOT: dmb
; THUMBTWO: ldrb
+; THUMBTWO-NOT: dmb
; THUMBTWO: strb
-; THUMBONE: test3
+; THUMBTWO-NOT: dmb
+; THUMBTWO: bx lr
+
+; THUMBONE-LABEL: test3
+; THUMBONE-NOT: dmb
; THUMBONE: ldrb
+; THUMBONE-NOT: dmb
; THUMBONE: strb
+; THUMBONE-NOT: dmb
%val = load atomic i8* %ptr1 unordered, align 1
store atomic i8 %val, i8* %ptr2 unordered, align 1
ret void
}
define void @test4(i8* %ptr1, i8* %ptr2) {
-; THUMBONE: test4
+; THUMBONE-LABEL: test4
; THUMBONE: ___sync_val_compare_and_swap_1
; THUMBONE: ___sync_lock_test_and_set_1
%val = load atomic i8* %ptr1 seq_cst, align 1
@@ -57,14 +70,14 @@ define void @test4(i8* %ptr1, i8* %ptr2) {
}
define i64 @test_old_load_64bit(i64* %p) {
-; ARMV4: test_old_load_64bit
+; ARMV4-LABEL: test_old_load_64bit
; ARMV4: ___sync_val_compare_and_swap_8
%1 = load atomic i64* %p seq_cst, align 8
ret i64 %1
}
define void @test_old_store_64bit(i64* %p, i64 %v) {
-; ARMV4: test_old_store_64bit
+; ARMV4-LABEL: test_old_store_64bit
; ARMV4: ___sync_lock_test_and_set_8
store atomic i64 %v, i64* %p seq_cst, align 8
ret void
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index ac8e949..b988242 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -198,7 +198,8 @@ entry:
define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
; CHECK-LABEL: test_cmpxchg_fail_order:
- %oldval = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 0
; CHECK: dmb ish
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
@@ -216,7 +217,8 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
; CHECK-LABEL: test_cmpxchg_fail_order1:
- %oldval = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
+ %oldval = extractvalue { i32, i1 } %pair, 0
; CHECK-NOT: dmb ish
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index a39565e..7072aaa 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1051,7 +1051,8 @@ define void @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %old = extractvalue { i8, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
@@ -1077,7 +1078,8 @@ define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind
define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %old = extractvalue { i16, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
@@ -1103,7 +1105,8 @@ define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounw
define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %old = extractvalue { i32, i1 } %pair, 0
store i32 %old, i32* @var32
; CHECK-NOT: dmb
; CHECK-NOT: mcr
@@ -1130,7 +1133,8 @@ define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %old = extractvalue { i64, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
diff --git a/test/CodeGen/ARM/big-endian-neon-extend.ll b/test/CodeGen/ARM/big-endian-neon-extend.ll
new file mode 100644
index 0000000..931c6c3
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-neon-extend.ll
@@ -0,0 +1,81 @@
+; RUN: llc < %s -mtriple armeb-eabi -mattr v7,neon -o - | FileCheck %s
+
+define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i64:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vmov.i64 {{q[0-9]+}}, #0xff
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i64>
+ store <2 x i64> %2, <2 x i64>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i16_to_2i64:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vmov.i64 {{q[0-9]+}}, #0xffff
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i16>* %loadaddr
+ %2 = zext <2 x i16> %1 to <2 x i64>
+ store <2 x i64> %2, <2 x i64>* %storeaddr
+ ret void
+}
+
+
+define void @vector_ext_2i8_to_2i32( <2 x i8>* %loadaddr, <2 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i32:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i32>
+ store <2 x i32> %2, <2 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i16_to_2i32( <2 x i16>* %loadaddr, <2 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i16_to_2i32:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i16>* %loadaddr
+ %2 = zext <2 x i16> %1 to <2 x i32>
+ store <2 x i32> %2, <2 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i8_to_2i16( <2 x i8>* %loadaddr, <2 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i16:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i16>
+ store <2 x i16> %2, <2 x i16>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_4i8_to_4i32( <4 x i8>* %loadaddr, <4 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_4i8_to_4i32:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <4 x i8>* %loadaddr
+ %2 = zext <4 x i8> %1 to <4 x i32>
+ store <4 x i32> %2, <4 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_4i8_to_4i16( <4 x i8>* %loadaddr, <4 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_4i8_to_4i16:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <4 x i8>* %loadaddr
+ %2 = zext <4 x i8> %1 to <4 x i16>
+ store <4 x i16> %2, <4 x i16>* %storeaddr
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/big-endian-neon-trunc-store.ll b/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
new file mode 100644
index 0000000..65147ad
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple armeb-eabi -mattr v7,neon -o - | FileCheck %s
+
+define void @vector_trunc_store_2i64_to_2i16( <2 x i64>* %loadaddr, <2 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_trunc_store_2i64_to_2i16:
+; CHECK: vmovn.i64 [[REG:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vuzp.16 [[REG]], [[REG2:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG2]]
+ %1 = load <2 x i64>* %loadaddr
+ %2 = trunc <2 x i64> %1 to <2 x i16>
+ store <2 x i16> %2, <2 x i16>* %storeaddr
+ ret void
+}
+
+define void @vector_trunc_store_4i32_to_4i8( <4 x i32>* %loadaddr, <4 x i8>* %storeaddr ) {
+; CHECK-LABEL: vector_trunc_store_4i32_to_4i8:
+; CHECK: vmovn.i32 [[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vuzp.8 [[REG]], [[REG2:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG2]]
+ %1 = load <4 x i32>* %loadaddr
+ %2 = trunc <4 x i32> %1 to <4 x i8>
+ store <4 x i8> %2, <4 x i8>* %storeaddr
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/big-endian-ret-f64.ll b/test/CodeGen/ARM/big-endian-ret-f64.ll
new file mode 100644
index 0000000..614bfc0
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-ret-f64.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple=armebv7a-eabi %s -O0 -o - | FileCheck %s
+; RUN: llc -mtriple=armebv8a-eabi %s -O0 -o - | FileCheck %s
+
+define double @fn() {
+; CHECK-LABEL: fn
+; CHECK: ldr r0, [sp]
+; CHECK: ldr r1, [sp, #4]
+ %r = alloca double, align 8
+ %1 = load double* %r, align 8
+ ret double %1
+}
+
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index 40694bf..a35fd74 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=armv6-apple-ios5.0 -mattr=+vfp2 | FileCheck %s -check-prefix=CHECKV6
-; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 | FileCheck %s -check-prefix=CHECKT2D
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 \
+; RUN: llc < %s -mtriple=armv6-apple-ios5.0 -mattr=+vfp2 -arm-atomic-cfg-tidy=0 | FileCheck %s -check-prefix=CHECKV6
+; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 -arm-atomic-cfg-tidy=0 | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 -arm-atomic-cfg-tidy=0 \
; RUN: | FileCheck %s -check-prefix=CHECKELF
; Enable tailcall optimization for iOS 5.0
diff --git a/test/CodeGen/ARM/cmpxchg-idioms.ll b/test/CodeGen/ARM/cmpxchg-idioms.ll
new file mode 100644
index 0000000..fb88575
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-idioms.ll
@@ -0,0 +1,107 @@
+; RUN: llc -mtriple=thumbv7s-apple-ios7.0 -o - %s | FileCheck %s
+
+define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_return:
+
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: bne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strex [[STATUS:r[0-9]+]], {{r[0-9]+}}, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs r0, #1
+; CHECK: dmb ish
+; CHECK: bx lr
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs r0, #0
+; CHECK: dmb ish
+; CHECK: bx lr
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %conv = zext i1 %success to i32
+ ret i32 %conv
+}
+
+define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
+; CHECK-LABEL: test_return_bool:
+
+; CHECK: uxtb [[OLDBYTE:r[0-9]+]], r1
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrexb [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], [[OLDBYTE]]
+; CHECK: bne [[FAIL:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strexb [[STATUS:r[0-9]+]], {{r[0-9]+}}, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+ ; FIXME: this eor is redundant. Need to teach DAG combine that.
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs [[TMP:r[0-9]+]], #1
+; CHECK: eor r0, [[TMP]], #1
+; CHECK: bx lr
+
+; CHECK: [[FAIL]]:
+; CHECK: movs [[TMP:r[0-9]+]], #0
+; CHECK: eor r0, [[TMP]], #1
+; CHECK: bx lr
+
+
+ %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
+ %success = extractvalue { i8, i1 } %pair, 1
+ %failure = xor i1 %success, 1
+ ret i1 %failure
+}
+
+define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_conditional:
+
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: bne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strex [[STATUS:r[0-9]+]], r2, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: dmb ish
+; CHECK: b.w _bar
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: dmb ish
+; CHECK: b.w _baz
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ tail call void @bar() #2
+ br label %end
+
+false:
+ tail call void @baz() #2
+ br label %end
+
+end:
+ ret void
+}
+
+declare void @bar()
+declare void @baz()
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
new file mode 100644
index 0000000..126e330
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
+
+define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_weak:
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 0
+; CHECK: dmb ish
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: strexeq [[SUCCESS:r[0-9]+]], r2, [r0]
+; CHECK: cmpeq [[SUCCESS]], #0
+; CHECK: bne [[DONE:LBB[0-9]+_[0-9]+]]
+; CHECK: dmb ish
+; CHECK: [[DONE]]:
+; CHECK: str r3, [r0]
+; CHECK: bx lr
+
+ store i32 %oldval, i32* %addr
+ ret void
+}
+
+
+define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_weak_to_bool:
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %success = extractvalue { i32, i1 } %pair, 1
+
+; CHECK: dmb ish
+; CHECK: mov r0, #0
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r1]
+; CHECK: cmp [[LOADED]], r2
+; CHECK: strexeq [[STATUS:r[0-9]+]], r3, [r1]
+; CHECK: cmpeq [[STATUS]], #0
+; CHECK: bne [[DONE:LBB[0-9]+_[0-9]+]]
+; CHECK: dmb ish
+; CHECK: mov r0, #1
+; CHECK: [[DONE]]:
+; CHECK: bx lr
+
+ ret i1 %success
+}
diff --git a/test/CodeGen/ARM/data-in-code-annotations.ll b/test/CodeGen/ARM/data-in-code-annotations.ll
index da70178..5eb81b2 100644
--- a/test/CodeGen/ARM/data-in-code-annotations.ll
+++ b/test/CodeGen/ARM/data-in-code-annotations.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-darwin -arm-atomic-cfg-tidy=0 | FileCheck %s
define double @f1() nounwind {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
index e8bf3ba..31d0324 100644
--- a/test/CodeGen/ARM/debug-info-arg.ll
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -59,7 +59,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!24 = metadata !{i32 11, i32 81, metadata !1, null}
!25 = metadata !{i32 11, i32 101, metadata !1, null}
!26 = metadata !{i32 12, i32 3, metadata !27, null}
-!27 = metadata !{i32 786443, metadata !1, i32 11, i32 107, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!27 = metadata !{i32 786443, metadata !2, metadata !1, i32 11, i32 107, i32 0} ; [ DW_TAG_lexical_block ]
!28 = metadata !{i32 13, i32 5, metadata !27, null}
!29 = metadata !{i32 14, i32 1, metadata !27, null}
!30 = metadata !{metadata !1}
diff --git a/test/CodeGen/ARM/debug-info-blocks.ll b/test/CodeGen/ARM/debug-info-blocks.ll
index 6cbe4b4..5ad5e59 100644
--- a/test/CodeGen/ARM/debug-info-blocks.ll
+++ b/test/CodeGen/ARM/debug-info-blocks.ll
@@ -231,10 +231,10 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!133 = metadata !{i32 609, i32 175, metadata !23, null}
!134 = metadata !{i32 786689, metadata !23, metadata !"data", metadata !24, i32 67109473, metadata !108, i32 0, null} ; [ DW_TAG_arg_variable ]
!135 = metadata !{i32 609, i32 190, metadata !23, null}
-!136 = metadata !{i32 786688, metadata !23, metadata !"mydata", metadata !24, i32 604, metadata !50, i32 0, null, i64 1, i64 20, i64 2, i64 1, i64 4, i64 2, i64 1, i64 24} ; [ DW_TAG_auto_variable ]
+!136 = metadata !{i32 786688, metadata !23, metadata !"mydata", metadata !24, i32 604, metadata !50, i32 0, null, metadata !163} ; [ DW_TAG_auto_variable ]
!137 = metadata !{i32 604, i32 49, metadata !23, null}
-!138 = metadata !{i32 786688, metadata !23, metadata !"self", metadata !40, i32 604, metadata !90, i32 0, null, i64 1, i64 24} ; [ DW_TAG_auto_variable ]
-!139 = metadata !{i32 786688, metadata !23, metadata !"semi", metadata !24, i32 607, metadata !125, i32 0, null, i64 1, i64 28} ; [ DW_TAG_auto_variable ]
+!138 = metadata !{i32 786688, metadata !23, metadata !"self", metadata !40, i32 604, metadata !90, i32 0, null, metadata !164} ; [ DW_TAG_auto_variable ]
+!139 = metadata !{i32 786688, metadata !23, metadata !"semi", metadata !24, i32 607, metadata !125, i32 0, null, metadata !165} ; [ DW_TAG_auto_variable ]
!140 = metadata !{i32 607, i32 30, metadata !23, null}
!141 = metadata !{i32 610, i32 17, metadata !142, null}
!142 = metadata !{i32 786443, metadata !152, metadata !23, i32 609, i32 200, i32 94} ; [ DW_TAG_lexical_block ]
@@ -258,3 +258,6 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!160 = metadata !{metadata !"header.h", metadata !"/Volumes/Sandbox/llvm"}
!161 = metadata !{metadata !"header2.h", metadata !"/Volumes/Sandbox/llvm"}
!162 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!163 = metadata !{i64 1, i64 20, i64 2, i64 1, i64 4, i64 2, i64 1, i64 24}
+!164 = metadata !{i64 1, i64 24}
+!165 = metadata !{i64 1, i64 28}
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index 695a20b..eb0120f 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -12,11 +12,11 @@ declare void @bar(i8*)
define void @check_simple() minsize {
; CHECK-LABEL: check_simple:
-; CHECK: push {r3, r4, r5, r6, r7, lr}
+; CHECK: push.w {r7, r8, r9, r10, r11, lr}
; CHECK-NOT: sub sp, sp,
; ...
; CHECK-NOT: add sp, sp,
-; CHECK: pop {r0, r1, r2, r3, r7, pc}
+; CHECK: pop.w {r0, r1, r2, r3, r11, pc}
; CHECK-T1-LABEL: check_simple:
; CHECK-T1: push {r3, r4, r5, r6, r7, lr}
@@ -44,11 +44,11 @@ define void @check_simple() minsize {
define void @check_simple_too_big() minsize {
; CHECK-LABEL: check_simple_too_big:
-; CHECK: push {r7, lr}
+; CHECK: push.w {r11, lr}
; CHECK: sub sp,
; ...
; CHECK: add sp,
-; CHECK: pop {r7, pc}
+; CHECK: pop.w {r11, pc}
%var = alloca i8, i32 64
call void @bar(i8* %var)
ret void
@@ -93,11 +93,11 @@ define void @check_vfp_fold() minsize {
; folded in except that doing so would clobber the value being returned.
define i64 @check_no_return_clobber() minsize {
; CHECK-LABEL: check_no_return_clobber:
-; CHECK: push {r1, r2, r3, r4, r5, r6, r7, lr}
+; CHECK: push.w {r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NOT: sub sp,
; ...
; CHECK: add sp, #24
-; CHECK: pop {r7, pc}
+; CHECK: pop.w {r11, pc}
; Just to keep iOS FileCheck within previous function:
; CHECK-IOS-LABEL: check_no_return_clobber:
@@ -176,9 +176,9 @@ define void @test_varsize(...) minsize {
; CHECK-LABEL: test_varsize:
; CHECK: sub sp, #16
-; CHECK: push {r5, r6, r7, lr}
+; CHECK: push.w {r9, r10, r11, lr}
; ...
-; CHECK: pop.w {r2, r3, r7, lr}
+; CHECK: pop.w {r2, r3, r11, lr}
; CHECK: add sp, #16
; CHECK: bx lr
diff --git a/test/CodeGen/ARM/fptoint.ll b/test/CodeGen/ARM/fptoint.ll
index c721756..f50d0b9 100644
--- a/test/CodeGen/ARM/fptoint.ll
+++ b/test/CodeGen/ARM/fptoint.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 -mattr=+v6,+vfp2 %s -o - | FileCheck %s
@i = weak global i32 0 ; <i32*> [#uses=2]
@u = weak global i32 0 ; <i32*> [#uses=2]
diff --git a/test/CodeGen/ARM/global-merge-1.ll b/test/CodeGen/ARM/global-merge-1.ll
new file mode 100644
index 0000000..341597e
--- /dev/null
+++ b/test/CodeGen/ARM/global-merge-1.ll
@@ -0,0 +1,85 @@
+; RUN: llc %s -O0 -o - | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O0 -o - -enable-global-merge=false | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O0 -o - -enable-global-merge=true | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O1 -o - | FileCheck -check-prefix=MERGE %s
+; RUN: llc %s -O1 -o - -enable-global-merge=false | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O1 -o - -enable-global-merge=true | FileCheck -check-prefix=MERGE %s
+
+; MERGE-NOT: .zerofill __DATA,__bss,_bar,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_baz,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_foo,20,2
+; MERGE: .zerofill __DATA,__bss,__MergedGlobals,60,4
+; MERGE-NOT: .zerofill __DATA,__bss,_bar,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_baz,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_foo,20,2
+
+; NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,60,4
+; NO-MERGE: .zerofill __DATA,__bss,_bar,20,2
+; NO-MERGE: .zerofill __DATA,__bss,_baz,20,2
+; NO-MERGE: .zerofill __DATA,__bss,_foo,20,2
+; NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,60,4
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+@bar = internal global [5 x i32] zeroinitializer, align 4
+@baz = internal global [5 x i32] zeroinitializer, align 4
+@foo = internal global [5 x i32] zeroinitializer, align 4
+
+; Function Attrs: nounwind ssp
+define internal void @initialize() #0 {
+ %1 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %1, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 0), align 4, !tbaa !1
+ %2 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %2, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 0), align 4, !tbaa !1
+ %3 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 1), align 4, !tbaa !1
+ %4 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %4, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 1), align 4, !tbaa !1
+ %5 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %5, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 2), align 4, !tbaa !1
+ %6 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 2), align 4, !tbaa !1
+ %7 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %7, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 3), align 4, !tbaa !1
+ %8 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %8, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 3), align 4, !tbaa !1
+ %9 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
+ %10 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %10, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
+ ret void
+}
+
+declare i32 @calc(...) #1
+
+; Function Attrs: nounwind ssp
+define internal void @calculate() #0 {
+ %1 = load <4 x i32>* bitcast ([5 x i32]* @bar to <4 x i32>*), align 4
+ %2 = load <4 x i32>* bitcast ([5 x i32]* @baz to <4 x i32>*), align 4
+ %3 = mul <4 x i32> %2, %1
+ store <4 x i32> %3, <4 x i32>* bitcast ([5 x i32]* @foo to <4 x i32>*), align 4
+ %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
+ %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
+ %6 = mul nsw i32 %5, %4
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i32 0, i32 4), align 4, !tbaa !1
+ ret void
+}
+
+; Function Attrs: nounwind readnone ssp
+define internal i32* @returnFoo() #2 {
+ ret i32* getelementptr inbounds ([5 x i32]* @foo, i32 0, i32 0)
+}
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"LLVM version 3.4 "}
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight.ll b/test/CodeGen/ARM/ifcvt-branch-weight.ll
index cd8a561..a994d3d 100644
--- a/test/CodeGen/ARM/ifcvt-branch-weight.ll
+++ b/test/CodeGen/ARM/ifcvt-branch-weight.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv8 -print-machineinstrs=if-converter -o /dev/null 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 -print-machineinstrs=if-converter -arm-atomic-cfg-tidy=0 -o /dev/null 2>&1 | FileCheck %s
%struct.S = type { i8* (i8*)*, [1 x i8] }
define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
diff --git a/test/CodeGen/ARM/ifcvt10.ll b/test/CodeGen/ARM/ifcvt10.ll
index 26c7272..509c182 100644
--- a/test/CodeGen/ARM/ifcvt10.ll
+++ b/test/CodeGen/ARM/ifcvt10.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -arm-atomic-cfg-tidy=0 -mcpu=cortex-a9 | FileCheck %s
; rdar://8402126
; Make sure if-converter is not predicating vldmia and ldmia. These are
; micro-coded and would have long issue latency even if predicated on
diff --git a/test/CodeGen/ARM/indirectbr-3.ll b/test/CodeGen/ARM/indirectbr-3.ll
index 5a9c459..291fedb 100644
--- a/test/CodeGen/ARM/indirectbr-3.ll
+++ b/test/CodeGen/ARM/indirectbr-3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
; If ARMBaseInstrInfo::AnalyzeBlocks returns the wrong value, which was possible
; for blocks with indirect branches, the IfConverter could end up deleting
diff --git a/test/CodeGen/ARM/interrupt-attr.ll b/test/CodeGen/ARM/interrupt-attr.ll
index c5be667..cb67dd9 100644
--- a/test/CodeGen/ARM/interrupt-attr.ll
+++ b/test/CodeGen/ARM/interrupt-attr.ll
@@ -35,15 +35,15 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; Normal AAPCS function (r0-r3 pushed onto stack by hardware, lr set to
; appropriate sentinel so no special return needed).
; CHECK-M-LABEL: irq_fn:
-; CHECK-M: push {r4, r6, r7, lr}
-; CHECK-M: add r7, sp, #8
+; CHECK-M: push.w {r4, r10, r11, lr}
+; CHECK-M: add.w r11, sp, #8
; CHECK-M: mov r4, sp
; CHECK-M: bic r4, r4, #7
; CHECK-M: mov sp, r4
; CHECK-M: blx _bar
-; CHECK-M: sub.w r4, r7, #8
+; CHECK-M: sub.w r4, r11, #8
; CHECK-M: mov sp, r4
-; CHECK-M: pop {r4, r6, r7, pc}
+; CHECK-M: pop.w {r4, r10, r11, pc}
call arm_aapcscc void @bar()
ret void
diff --git a/test/CodeGen/ARM/intrinsics-memory-barrier.ll b/test/CodeGen/ARM/intrinsics-memory-barrier.ll
new file mode 100644
index 0000000..5ee0b3e
--- /dev/null
+++ b/test/CodeGen/ARM/intrinsics-memory-barrier.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -mtriple=armv7 -mattr=+db | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -mattr=+db | FileCheck %s
+
+; CHECK-LABEL: test
+define void @test() {
+ call void @llvm.arm.dmb(i32 3) ; CHECK: dmb osh
+ call void @llvm.arm.dsb(i32 7) ; CHECK: dsb nsh
+ call void @llvm.arm.isb(i32 15) ; CHECK: isb sy
+ ret void
+}
+
+; Important point is that the compiler should not reorder memory access
+; instructions around DMB.
+; Failure to do so, two STRs will collapse into one STRD.
+; CHECK-LABEL: test_dmb_reordering
+define void @test_dmb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.dmb(i32 15) ; CHECK: dmb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+; Similarly for DSB.
+; CHECK-LABEL: test_dsb_reordering
+define void @test_dsb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.dsb(i32 15) ; CHECK: dsb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+; And ISB.
+; CHECK-LABEL: test_isb_reordering
+define void @test_isb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.isb(i32 15) ; CHECK: isb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+declare void @llvm.arm.dmb(i32)
+declare void @llvm.arm.dsb(i32)
+declare void @llvm.arm.isb(i32)
diff --git a/test/CodeGen/ARM/jump_tables.ll b/test/CodeGen/ARM/jump_tables.ll
new file mode 100644
index 0000000..907a86c
--- /dev/null
+++ b/test/CodeGen/ARM/jump_tables.ll
@@ -0,0 +1,32 @@
+; RUN: llc <%s -mtriple=arm-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=ARM %s
+; RUN: llc <%s -mtriple=thumb-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=THUMB %s
+
+define void @indirect_fun() unnamed_addr jumptable {
+ ret void
+}
+define void ()* @get_fun() {
+ ret void ()* @indirect_fun
+
+; ARM: ldr r0, [[LABEL:.*]]
+; ARM: mov pc, lr
+; ARM: [[LABEL]]:
+; ARM: .long __llvm_jump_instr_table_0_1
+
+; THUMB: ldr r0, [[LABEL:.*]]
+; THUMB: bx lr
+; THUMB: [[LABEL]]:
+; THUMB: .long __llvm_jump_instr_table_0_1
+}
+
+; ARM: .globl __llvm_jump_instr_table_0_1
+; ARM: .align 3
+; ARM: .type __llvm_jump_instr_table_0_1,%function
+; ARM: __llvm_jump_instr_table_0_1:
+; ARM: b indirect_fun(PLT)
+
+; THUMB: .globl __llvm_jump_instr_table_0_1
+; THUMB: .align 3
+; THUMB: .thumb_func
+; THUMB: .type __llvm_jump_instr_table_0_1,%function
+; THUMB: __llvm_jump_instr_table_0_1:
+; THUMB: b indirect_fun(PLT)
diff --git a/test/CodeGen/ARM/ldstrex-m.ll b/test/CodeGen/ARM/ldstrex-m.ll
new file mode 100644
index 0000000..b50699f
--- /dev/null
+++ b/test/CodeGen/ARM/ldstrex-m.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK-NOT: ldrexd
+define i64 @f0(i64* %p) nounwind readonly {
+entry:
+ %0 = load atomic i64* %p seq_cst, align 8
+ ret i64 %0
+}
+
+; CHECK-LABEL: f1:
+; CHECK-NOT: strexd
+define void @f1(i64* %p) nounwind readonly {
+entry:
+ store atomic i64 0, i64* %p seq_cst, align 8
+ ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK-NOT: ldrexd
+; CHECK-NOT: strexd
+define i64 @f2(i64* %p) nounwind readonly {
+entry:
+ %0 = atomicrmw add i64* %p, i64 1 seq_cst
+ ret i64 %0
+}
+
+; CHECK-LABEL: f3:
+; CHECK: ldr
+define i32 @f3(i32* %p) nounwind readonly {
+entry:
+ %0 = load atomic i32* %p seq_cst, align 4
+ ret i32 %0
+}
+
+; CHECK-LABEL: f4:
+; CHECK: ldrb
+define i8 @f4(i8* %p) nounwind readonly {
+entry:
+ %0 = load atomic i8* %p seq_cst, align 4
+ ret i8 %0
+}
+
+; CHECK-LABEL: f5:
+; CHECK: str
+define void @f5(i32* %p) nounwind readonly {
+entry:
+ store atomic i32 0, i32* %p seq_cst, align 4
+ ret void
+}
+
+; CHECK-LABEL: f6:
+; CHECK: ldrex
+; CHECK: strex
+define i32 @f6(i32* %p) nounwind readonly {
+entry:
+ %0 = atomicrmw add i32* %p, i32 1 seq_cst
+ ret i32 %0
+}
diff --git a/test/CodeGen/ARM/lit.local.cfg b/test/CodeGen/ARM/lit.local.cfg
index 8a3ba96..98c6700 100644
--- a/test/CodeGen/ARM/lit.local.cfg
+++ b/test/CodeGen/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/ARM/lsr-unfolded-offset.ll b/test/CodeGen/ARM/lsr-unfolded-offset.ll
index 1dafa00..3ad60d4 100644
--- a/test/CodeGen/ARM/lsr-unfolded-offset.ll
+++ b/test/CodeGen/ARM/lsr-unfolded-offset.ll
@@ -1,4 +1,4 @@
-; RUN: llc -regalloc=greedy < %s | FileCheck %s
+; RUN: llc -regalloc=greedy -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
; LSR shouldn't introduce more induction variables than needed, increasing
; register pressure and therefore spilling. There is more room for improvement
diff --git a/test/CodeGen/ARM/metadata-default.ll b/test/CodeGen/ARM/metadata-default.ll
new file mode 100644
index 0000000..f6a3fe2
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-default.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+
+; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-enums.ll b/test/CodeGen/ARM/metadata-short-enums.ll
new file mode 100644
index 0000000..bccd332
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-short-enums.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 1}
+
+; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 1 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-wchar.ll b/test/CodeGen/ARM/metadata-short-wchar.ll
new file mode 100644
index 0000000..6de9bf1
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-short-wchar.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 2}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+
+; CHECK: .eabi_attribute 18, 2 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
index 26adf0c..bb2d42c 100644
--- a/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=thumb-eabi -mcpu=swift -pre-RA-sched=source -join-globalcopies -enable-misched -verify-misched -debug-only=misched %s -o - 2>&1 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=swift -pre-RA-sched=source -join-globalcopies -enable-misched -verify-misched -debug-only=misched -arm-atomic-cfg-tidy=0 %s -o - 2>&1 | FileCheck %s
;
; Loop counter copies should be eliminated.
; There is also a MUL here, but we don't care where it is scheduled.
diff --git a/test/CodeGen/ARM/none-macho.ll b/test/CodeGen/ARM/none-macho.ll
index 2795b8c..60c2171 100644
--- a/test/CodeGen/ARM/none-macho.ll
+++ b/test/CodeGen/ARM/none-macho.ll
@@ -48,8 +48,8 @@ define i32 @test_frame_ptr() {
; CHECK-LABEL: test_frame_ptr:
call void @test_trap()
- ; Frame pointer is r7 as for Darwin
-; CHECK: mov r7, sp
+ ; Frame pointer is r11.
+; CHECK: mov r11, sp
ret i32 42
}
@@ -63,11 +63,9 @@ define void @test_two_areas(%big_arr* %addr) {
; This goes with the choice of r7 as FP (largely). FP and LR have to be stored
; consecutively on the stack for the frame record to be valid, which means we
; need the 2 register-save areas employed by iOS.
-; CHECK-NON-FAST: push {r4, r5, r6, r7, lr}
-; CHECK-NON-FAST: push.w {r8, r9, r10, r11}
+; CHECK-NON-FAST: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ...
-; CHECK-NON-FAST: pop.w {r8, r9, r10, r11}
-; CHECK-NON-FAST: pop {r4, r5, r6, r7, pc}
+; CHECK-NON-FAST: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
ret void
}
diff --git a/test/CodeGen/ARM/null-streamer.ll b/test/CodeGen/ARM/null-streamer.ll
new file mode 100644
index 0000000..350c45e
--- /dev/null
+++ b/test/CodeGen/ARM/null-streamer.ll
@@ -0,0 +1,7 @@
+; Test the null streamer with a terget streamer.
+; RUN: llc -O0 -filetype=null -mtriple=arm-linux < %s
+
+define i32 @main() {
+entry:
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index b245674..feed5ad 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 -regalloc=basic | FileCheck %s
; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's.
%struct.int16x8_t = type { <8 x i16> }
diff --git a/test/CodeGen/ARM/spill-q.ll b/test/CodeGen/ARM/spill-q.ll
index b924663..4fa97ea 100644
--- a/test/CodeGen/ARM/spill-q.ll
+++ b/test/CodeGen/ARM/spill-q.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-elf -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-elf -mattr=+neon -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4789
%bar = type { float, float, float }
diff --git a/test/CodeGen/ARM/struct-byval-frame-index.ll b/test/CodeGen/ARM/struct-byval-frame-index.ll
index 465ee12..0fd55ec 100644
--- a/test/CodeGen/ARM/struct-byval-frame-index.ll
+++ b/test/CodeGen/ARM/struct-byval-frame-index.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=cortex-a15 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=cortex-a15 -verify-machineinstrs -arm-atomic-cfg-tidy=0 | FileCheck %s
; Check a spill right after a function call with large struct byval is correctly
; generated.
diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll
index 8da875f..01df3b4 100644
--- a/test/CodeGen/ARM/twoaddrinstr.ll
+++ b/test/CodeGen/ARM/twoaddrinstr.ll
@@ -1,5 +1,5 @@
; Tests for the two-address instruction pass.
-; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -arm-atomic-cfg-tidy=0 %s -o - | FileCheck %s
define void @PR13378() nounwind {
; This was orriginally a crasher trying to schedule the instructions.
diff --git a/test/CodeGen/ARM/va_arg.ll b/test/CodeGen/ARM/va_arg.ll
index f18b498..d901a74 100644
--- a/test/CodeGen/ARM/va_arg.ll
+++ b/test/CodeGen/ARM/va_arg.ll
@@ -24,13 +24,13 @@ entry:
; CHECK-NOT: bfc
; CHECK: bx lr
-define double @test2(i32 %a, i32 %b, ...) nounwind optsize {
+define double @test2(i32 %a, i32* %b, ...) nounwind optsize {
entry:
%ap = alloca i8*, align 4 ; <i8**> [#uses=3]
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap1)
%0 = va_arg i8** %ap, i32 ; <i32> [#uses=0]
- store i32 %0, i32* undef
+ store i32 %0, i32* %b
%1 = va_arg i8** %ap, double ; <double> [#uses=1]
call void @llvm.va_end(i8* %ap1)
ret double %1
diff --git a/test/CodeGen/ARM/vldm-sched-a9.ll b/test/CodeGen/ARM/vldm-sched-a9.ll
index d0a9ac6..f2e5eb9 100644
--- a/test/CodeGen/ARM/vldm-sched-a9.ll
+++ b/test/CodeGen/ARM/vldm-sched-a9.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
+; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -arm-atomic-cfg-tidy=0 -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32-S64"
diff --git a/test/CodeGen/ARM/widen-vmovs.ll b/test/CodeGen/ARM/widen-vmovs.ll
index 1efbc73..316cfab 100644
--- a/test/CodeGen/ARM/widen-vmovs.ll
+++ b/test/CodeGen/ARM/widen-vmovs.ll
@@ -17,7 +17,7 @@ target triple = "thumbv7-apple-ios"
; - Register liveness is verified.
; - The execution domain switch to vorr works across basic blocks.
-define void @Mm() nounwind {
+define void @Mm(i32 %in, float* %addr) nounwind {
entry:
br label %for.body4
@@ -27,10 +27,10 @@ for.body4:
for.body.i:
%tmp3.i = phi float [ 1.000000e+10, %for.body4 ], [ %add.i, %for.body.i ]
%add.i = fadd float %tmp3.i, 1.000000e+10
- %exitcond.i = icmp eq i32 undef, 41
+ %exitcond.i = icmp eq i32 %in, 41
br i1 %exitcond.i, label %rInnerproduct.exit, label %for.body.i
rInnerproduct.exit:
- store float %add.i, float* undef, align 4
+ store float %add.i, float* %addr, align 4
br label %for.body4
}
diff --git a/test/CodeGen/CPP/atomic.ll b/test/CodeGen/CPP/atomic.ll
new file mode 100644
index 0000000..e79c45d
--- /dev/null
+++ b/test/CodeGen/CPP/atomic.ll
@@ -0,0 +1,89 @@
+; RUN: llc -march=cpp -o - %s | FileCheck %s
+
+define void @test_atomicrmw(i32* %addr, i32 %inc) {
+ %inst0 = atomicrmw xchg i32* %addr, i32 %inc seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Xchg, {{.*}}, SequentiallyConsistent, CrossThread
+ ; CHECK: [[INST]]->setName("inst0");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst1 = atomicrmw add i32* %addr, i32 %inc seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Add, {{.*}}, SequentiallyConsistent, CrossThread
+ ; CHECK: [[INST]]->setName("inst1");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst2 = atomicrmw volatile sub i32* %addr, i32 %inc singlethread monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Sub, {{.*}}, Monotonic, SingleThread
+ ; CHECK: [[INST]]->setName("inst2");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst3 = atomicrmw and i32* %addr, i32 %inc acq_rel
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::And, {{.*}}, AcquireRelease, CrossThread
+ ; CHECK: [[INST]]->setName("inst3");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst4 = atomicrmw nand i32* %addr, i32 %inc release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Nand, {{.*}}, Release, CrossThread
+ ; CHECK: [[INST]]->setName("inst4");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst5 = atomicrmw volatile or i32* %addr, i32 %inc singlethread seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Or, {{.*}}, SequentiallyConsistent, SingleThread
+ ; CHECK: [[INST]]->setName("inst5");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst6 = atomicrmw xor i32* %addr, i32 %inc release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Xor, {{.*}}, Release, CrossThread
+ ; CHECK: [[INST]]->setName("inst6");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst7 = atomicrmw volatile max i32* %addr, i32 %inc singlethread monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Max, {{.*}}, Monotonic, SingleThread
+ ; CHECK: [[INST]]->setName("inst7");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst8 = atomicrmw min i32* %addr, i32 %inc acquire
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Min, {{.*}}, Acquire, CrossThread
+ ; CHECK: [[INST]]->setName("inst8");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst9 = atomicrmw volatile umax i32* %addr, i32 %inc monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::UMax, {{.*}}, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst9");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst10 = atomicrmw umin i32* %addr, i32 %inc singlethread release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::UMin, {{.*}}, Release, SingleThread
+ ; CHECK: [[INST]]->setName("inst10");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+
+ ret void
+}
+
+define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+ %inst0 = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyConsistent, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst0");
+ ; CHECK: [[INST]]->setVolatile(false);
+ ; CHECK: [[INST]]->setWeak(false);
+
+ %inst1 = cmpxchg volatile i32* %addr, i32 %desired, i32 %new singlethread acq_rel acquire
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease, Acquire, SingleThread
+ ; CHECK: [[INST]]->setName("inst1");
+ ; CHECK: [[INST]]->setVolatile(true);
+ ; CHECK: [[INST]]->setWeak(false);
+
+ %inst2 = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyConsistent, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst2");
+ ; CHECK: [[INST]]->setVolatile(false);
+ ; CHECK: [[INST]]->setWeak(true);
+
+ %inst3 = cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread acq_rel acquire
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease, Acquire, SingleThread
+ ; CHECK: [[INST]]->setName("inst3");
+ ; CHECK: [[INST]]->setVolatile(true);
+ ; CHECK: [[INST]]->setWeak(true);
+
+ ret void
+}
diff --git a/test/CodeGen/CPP/lit.local.cfg b/test/CodeGen/CPP/lit.local.cfg
index 4063dd1..3ff5c6b 100644
--- a/test/CodeGen/CPP/lit.local.cfg
+++ b/test/CodeGen/CPP/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'CppBackend' in targets:
+if not 'CppBackend' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll
index 802ee2c..0e98280 100644
--- a/test/CodeGen/Generic/MachineBranchProb.ll
+++ b/test/CodeGen/Generic/MachineBranchProb.ll
@@ -1,5 +1,8 @@
; RUN: llc < %s -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s
+; ARM & AArch64 run an extra SimplifyCFG which disrupts this test.
+; XFAIL: arm,aarch64
+
; Make sure we have the correct weight attached to each successor.
define i32 @test2(i32 %x) nounwind uwtable readnone ssp {
; CHECK: Machine code for function test2:
diff --git a/test/CodeGen/Generic/select.ll b/test/CodeGen/Generic/select.ll
index 77636eb..c4841b7 100644
--- a/test/CodeGen/Generic/select.ll
+++ b/test/CodeGen/Generic/select.ll
@@ -192,4 +192,3 @@ define <1 x i32> @checkScalariseVSELECT(<1 x i32> %a, <1 x i32> %b) {
%s = select <1 x i1> %cond, <1 x i32> %a, <1 x i32> %b
ret <1 x i32> %s
}
-
diff --git a/test/CodeGen/Generic/stop-after.ll b/test/CodeGen/Generic/stop-after.ll
index 557e097..5e0e350 100644
--- a/test/CodeGen/Generic/stop-after.ll
+++ b/test/CodeGen/Generic/stop-after.ll
@@ -5,6 +5,6 @@
; STOP: Loop Strength Reduction
; STOP-NEXT: Machine Function Analysis
-; START: -machine-branch-prob -gc-lowering
+; START: -machine-branch-prob -jump-instr-tables -gc-lowering
; START: FunctionPass Manager
; START-NEXT: Lower Garbage Collection Instructions
diff --git a/test/CodeGen/Hexagon/lit.local.cfg b/test/CodeGen/Hexagon/lit.local.cfg
index e96bab8..ba72ff6 100644
--- a/test/CodeGen/Hexagon/lit.local.cfg
+++ b/test/CodeGen/Hexagon/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Hexagon' in targets:
+if not 'Hexagon' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/MSP430/lit.local.cfg b/test/CodeGen/MSP430/lit.local.cfg
index a18fe6f..b1cf1fb 100644
--- a/test/CodeGen/MSP430/lit.local.cfg
+++ b/test/CodeGen/MSP430/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'MSP430' in targets:
+if not 'MSP430' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index e274bc0..3c1bb39 100644
--- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
%struct.DWstruct = type { i32, i32 }
diff --git a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
index f8390d9..6a210a0 100644
--- a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
+++ b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mips -mattr=-fp64 < %s | FileCheck -check-prefix=CHECK-FP32 %s
-; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
; This test case is a simplified version of an llvm-stress generated test with
; seed=3718491962.
diff --git a/test/CodeGen/Mips/Fast-ISel/loadstore2.ll b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
new file mode 100644
index 0000000..f113a0e
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
@@ -0,0 +1,83 @@
+; ModuleID = 'loadstore2.c'
+target datalayout = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@c2 = common global i8 0, align 1
+@c1 = common global i8 0, align 1
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@s2 = common global i16 0, align 2
+@s1 = common global i16 0, align 2
+@i2 = common global i32 0, align 4
+@i1 = common global i32 0, align 4
+@f2 = common global float 0.000000e+00, align 4
+@f1 = common global float 0.000000e+00, align 4
+@d2 = common global double 0.000000e+00, align 8
+@d1 = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @cfoo() #0 {
+entry:
+ %0 = load i8* @c2, align 1
+ store i8 %0, i8* @c1, align 1
+; CHECK-LABEL: cfoo:
+; CHECK: lbu $[[REGc:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sb $[[REGc]], 0(${{[0-9]+}})
+
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @sfoo() #0 {
+entry:
+ %0 = load i16* @s2, align 2
+ store i16 %0, i16* @s1, align 2
+; CHECK-LABEL: sfoo:
+; CHECK: lhu $[[REGs:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sh $[[REGs]], 0(${{[0-9]+}})
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ifoo() #0 {
+entry:
+ %0 = load i32* @i2, align 4
+ store i32 %0, i32* @i1, align 4
+; CHECK-LABEL: ifoo:
+; CHECK: lw $[[REGi:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sw $[[REGi]], 0(${{[0-9]+}})
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ffoo() #0 {
+entry:
+ %0 = load float* @f2, align 4
+ store float %0, float* @f1, align 4
+; CHECK-LABEL: ffoo:
+; CHECK: lwc1 $f[[REGf:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: swc1 $f[[REGf]], 0(${{[0-9]+}})
+
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @dfoo() #0 {
+entry:
+ %0 = load double* @d2, align 8
+ store double %0, double* @d1, align 8
+; CHECK-LABEL: dfoo:
+; CHECK: ldc1 $f[[REGd:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sdc1 $f[[REGd]], 0(${{[0-9]+}})
+; CHECK: .end dfoo
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
diff --git a/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
new file mode 100644
index 0000000..6759c01
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
@@ -0,0 +1,38 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@f = common global float 0.000000e+00, align 4
+@de = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @f1() #0 {
+entry:
+ store float 0x3FFA76C8C0000000, float* @f, align 4
+ ret void
+; CHECK: .ent f1
+; CHECK: lui $[[REG1:[0-9]+]], 16339
+; CHECK: ori $[[REG2:[0-9]+]], $[[REG1]], 46662
+; CHECK: mtc1 $[[REG2]], $f[[REG3:[0-9]+]]
+; CHECK: lw $[[REG4:[0-9]+]], %got(f)(${{[0-9]+}})
+; CHECK: swc1 $f[[REG3]], 0($[[REG4]])
+; CHECK: .end f1
+
+}
+
+; Function Attrs: nounwind
+define void @d1() #0 {
+entry:
+ store double 1.234567e+00, double* @de, align 8
+; CHECK: .ent d1
+; CHECK: lui $[[REG1a:[0-9]+]], 16371
+; CHECK: ori $[[REG2a:[0-9]+]], $[[REG1a]], 49353
+; CHECK: lui $[[REG1b:[0-9]+]], 21403
+; CHECK: ori $[[REG2b:[0-9]+]], $[[REG1b]], 34951
+; CHECK: mtc1 $[[REG2b]], $f[[REG3:[0-9]+]]
+; CHECK: mthc1 $[[REG2a]], $f[[REG3]]
+; CHECK: sdc1 $f[[REG3]], 0(${{[0-9]+}})
+; CHECK: .end d1
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/abiflags-xx.ll b/test/CodeGen/Mips/abiflags-xx.ll
new file mode 100644
index 0000000..b8aa071
--- /dev/null
+++ b/test/CodeGen/Mips/abiflags-xx.ll
@@ -0,0 +1,6 @@
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr=fpxx %s -o - | FileCheck %s
+; XFAIL: *
+
+; CHECK: .nan legacy
+; CHECK: .module fp=xx
+
diff --git a/test/CodeGen/Mips/abiflags32.ll b/test/CodeGen/Mips/abiflags32.ll
new file mode 100644
index 0000000..093964f
--- /dev/null
+++ b/test/CodeGen/Mips/abiflags32.ll
@@ -0,0 +1,12 @@
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr=fp64 %s -o - | FileCheck -check-prefix=CHECK-64 %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips64 -mattr=-n64,n32 %s -o - | FileCheck -check-prefix=CHECK-64n %s
+
+; CHECK: .nan legacy
+; CHECK: .module fp=32
+
+; CHECK-64: .nan legacy
+; CHECK-64: .module fp=64
+
+; CHECK-64n: .nan legacy
+; CHECK-64n: .module fp=64
diff --git a/test/CodeGen/Mips/analyzebranch.ll b/test/CodeGen/Mips/analyzebranch.ll
index 8ec5d93..4b5d097 100644
--- a/test/CodeGen/Mips/analyzebranch.ll
+++ b/test/CodeGen/Mips/analyzebranch.ll
@@ -1,9 +1,25 @@
-; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=32-GPR
+; RUN: llc -march=mips64 -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=64-GPR
define double @foo(double %a, double %b) nounwind readnone {
entry:
-; CHECK: bc1f $BB
-; CHECK: nop
+; ALL-LABEL: foo:
+
+; FCC: bc1f $BB
+; FCC: nop
+
+; 32-GPR: mtc1 $zero, $[[Z:f[0-9]]]
+; 32-GPR: mthc1 $zero, $[[Z:f[0-9]]]
+; 64-GPR: dmtc1 $zero, $[[Z:f[0-9]]]
+; GPR: cmp.lt.d $[[FGRCC:f[0-9]+]], $[[Z]], $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB
%cmp = fcmp ogt double %a, 0.000000e+00
br i1 %cmp, label %if.end6, label %if.else
@@ -25,8 +41,17 @@ return: ; preds = %if.else, %if.end6
define void @f1(float %f) nounwind {
entry:
-; CHECK: bc1f $BB
-; CHECK: nop
+; ALL-LABEL: f1:
+
+; FCC: bc1f $BB
+; FCC: nop
+
+; GPR: mtc1 $zero, $[[Z:f[0-9]]]
+; GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $[[Z]]
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB
+
%cmp = fcmp une float %f, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 77d7bf3..066d42c 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -1,5 +1,14 @@
-; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EL
-; RUN: llc -march=mips --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EB
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+
+; Keep one big-endian check so that we don't reduce testing, but don't add more
+; since endianness doesn't affect the body of the atomic operations.
+; RUN: llc -march=mips --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=CHECK-EB
@x = common global i32 0, align 4
@@ -8,21 +17,16 @@ entry:
%0 = atomicrmw add i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicLoadAdd32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EL: addu $[[R2:[0-9]+]], $[[R1]], $4
-; CHECK-EL: sc $[[R2]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicLoadAdd32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EB: addu $[[R2:[0-9]+]], $[[R1]], $4
-; CHECK-EB: sc $[[R2]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicLoadAdd32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
+; ALL: sc $[[R2]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicLoadNand32(i32 %incr) nounwind {
@@ -30,23 +34,17 @@ entry:
%0 = atomicrmw nand i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicLoadNand32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EL: and $[[R3:[0-9]+]], $[[R1]], $4
-; CHECK-EL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
-; CHECK-EL: sc $[[R2]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicLoadNand32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EB: and $[[R3:[0-9]+]], $[[R1]], $4
-; CHECK-EB: nor $[[R2:[0-9]+]], $zero, $[[R3]]
-; CHECK-EB: sc $[[R2]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicLoadNand32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: and $[[R3:[0-9]+]], $[[R1]], $4
+; ALL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
+; ALL: sc $[[R2]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicSwap32(i32 %newval) nounwind {
@@ -57,19 +55,15 @@ entry:
%0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicSwap32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll ${{[0-9]+}}, 0($[[R0]])
-; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicSwap32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll ${{[0-9]+}}, 0($[[R0]])
-; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicSwap32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll ${{[0-9]+}}, 0($[[R0]])
+; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
@@ -78,25 +72,20 @@ entry:
store i32 %newval, i32* %newval.addr, align 4
%tmp = load i32* %newval.addr, align 4
%0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
- ret i32 %0
+ %1 = extractvalue { i32, i1 } %0, 0
+ ret i32 %1
+
+; ALL-LABEL: AtomicCmpSwap32:
-; CHECK-EL-LABEL: AtomicCmpSwap32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $2, 0($[[R0]])
-; CHECK-EL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-; CHECK-EL: $[[BB1]]:
-
-; CHECK-EB-LABEL: AtomicCmpSwap32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $2, 0($[[R0]])
-; CHECK-EB: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
-; CHECK-EB: $[[BB1]]:
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $2, 0($[[R0]])
+; ALL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
+; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
+; ALL: $[[BB1]]:
}
@@ -108,56 +97,38 @@ entry:
%0 = atomicrmw add i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadAdd8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadAdd8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicLoadAdd8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
}
define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
@@ -165,56 +136,38 @@ entry:
%0 = atomicrmw sub i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadSub8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadSub8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; ALL-LABEL: AtomicLoadSub8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH:seb $2, $[[R16]]
}
define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
@@ -222,58 +175,39 @@ entry:
%0 = atomicrmw nand i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadNand8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: nor $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadNand8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: nor $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicLoadNand8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: nor $[[R11:[0-9]+]], $zero, $[[R18]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
}
define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
@@ -281,121 +215,126 @@ entry:
%0 = atomicrmw xchg i8* @y, i8 %newval monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicSwap8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R18:[0-9]+]], $[[R9]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicSwap8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R18:[0-9]+]], $[[R9]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicSwap8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R18:[0-9]+]], $[[R9]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
}
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
entry:
- %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %0 = extractvalue { i8, i1 } %pair0, 0
ret i8 %0
-; CHECK-EL-LABEL: AtomicCmpSwap8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: andi $[[R8:[0-9]+]], $4, 255
-; CHECK-EL: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
-; CHECK-EL: andi $[[R10:[0-9]+]], $5, 255
-; CHECK-EL: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R12:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
-; CHECK-EL: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
-
-; CHECK-EL: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
-; CHECK-EL: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
-; CHECK-EL: sc $[[R15]], 0($[[R2]])
-; CHECK-EL: beqz $[[R15]], $[[BB0]]
-
-; CHECK-EL: $[[BB1]]:
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicCmpSwap8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: andi $[[R9:[0-9]+]], $4, 255
-; CHECK-EB: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
-; CHECK-EB: andi $[[R11:[0-9]+]], $5, 255
-; CHECK-EB: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R13:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
-; CHECK-EB: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
-; CHECK-EB: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
-; CHECK-EB: sc $[[R16]], 0($[[R2]])
-; CHECK-EB: beqz $[[R16]], $[[BB0]]
-
-; CHECK-EB: $[[BB1]]:
-; CHECK-EB: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
-; CHECK-EB: sll $[[R18:[0-9]+]], $[[R17]], 24
-; CHECK-EB: sra $2, $[[R18]], 24
+; ALL-LABEL: AtomicCmpSwap8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: andi $[[R9:[0-9]+]], $4, 255
+; ALL: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
+; ALL: andi $[[R11:[0-9]+]], $5, 255
+; ALL: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R13:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
+; ALL: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
+; ALL: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
+; ALL: sc $[[R16]], 0($[[R2]])
+; ALL: beqz $[[R16]], $[[BB0]]
+
+; ALL: $[[BB1]]:
+; ALL: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R18:[0-9]+]], $[[R17]], 24
+; NO-SEB-SEH: sra $2, $[[R18]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R17]]
+}
+
+; Check one i16 so that we cover the seh sign extend
+@z = common global i16 0, align 1
+
+define signext i16 @AtomicLoadAdd16(i16 signext %incr) nounwind {
+entry:
+ %0 = atomicrmw add i16* @z, i16 %incr monotonic
+ ret i16 %0
+
+; ALL-LABEL: AtomicLoadAdd16:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(z)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(z)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 2
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 65535
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 16
+; NO-SEB-SEH: sra $2, $[[R17]], 16
+
+; MIPS32R2: seh $2, $[[R16]]
}
+
@countsint = common global i32 0, align 4
define i32 @CheckSync(i32 %v) nounwind noinline {
@@ -403,19 +342,13 @@ entry:
%0 = atomicrmw add i32* @countsint, i32 %v seq_cst
ret i32 %0
-; CHECK-EL-LABEL: CheckSync:
-; CHECK-EL: sync 0
-; CHECK-EL: ll
-; CHECK-EL: sc
-; CHECK-EL: beq
-; CHECK-EL: sync 0
-
-; CHECK-EB-LABEL: CheckSync:
-; CHECK-EB: sync 0
-; CHECK-EB: ll
-; CHECK-EB: sc
-; CHECK-EB: beq
-; CHECK-EB: sync 0
+; ALL-LABEL: CheckSync:
+
+; ALL: sync
+; ALL: ll
+; ALL: sc
+; ALL: beq
+; ALL: sync
}
; make sure that this assertion in
@@ -429,8 +362,29 @@ entry:
define i32 @zeroreg() nounwind {
entry:
- %0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
+ %pair0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
+ %0 = extractvalue { i32, i1 } %pair0, 0
%1 = icmp eq i32 %0, 1
%conv = zext i1 %1 to i32
ret i32 %conv
}
+
+; Check that MIPS32R6 has the correct offset range.
+; FIXME: At the moment, we don't seem to do addr+offset for any atomic load/store.
+define i32 @AtomicLoadAdd32_OffGt9Bit(i32 %incr) nounwind {
+entry:
+ %0 = atomicrmw add i32* getelementptr(i32* @x, i32 256), i32 %incr monotonic
+ ret i32 %0
+
+; ALL-LABEL: AtomicLoadAdd32_OffGt9Bit:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: addiu $[[PTR:[0-9]+]], $[[R0]], 1024
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[PTR]])
+; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
+; ALL: sc $[[R2]], 0($[[PTR]])
+; ALL: beqz $[[R2]], $[[BB0]]
+}
diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll
index dc07c63..c264152 100644
--- a/test/CodeGen/Mips/atomicops.ll
+++ b/test/CodeGen/Mips/atomicops.ll
@@ -20,7 +20,8 @@ entry:
%add.i = add nsw i32 %0, 2
%1 = load volatile i32* %x, align 4
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
- %2 = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
+ %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
+ %2 = extractvalue { i32, i1 } %pair, 0
%3 = load volatile i32* %x, align 4
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
%4 = atomicrmw xchg i32* %x, i32 1 seq_cst
diff --git a/test/CodeGen/Mips/buildpairextractelementf64.ll b/test/CodeGen/Mips/buildpairextractelementf64.ll
index b9bf2b6..88d1d07 100644
--- a/test/CodeGen/Mips/buildpairextractelementf64.ll
+++ b/test/CodeGen/Mips/buildpairextractelementf64.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=FP32 -check-prefix=CHECK
; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=FP32 -check-prefix=CHECK
-; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64 -check-prefix=CHECK
-; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64 -check-prefix=CHECK
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64 -check-prefix=CHECK
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64 -check-prefix=CHECK
@a = external global i32
diff --git a/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll b/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll
new file mode 100644
index 0000000..4b28b99
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV %s
+
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV --check-prefix=O32-FPXX-INV %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV --check-prefix=O32-FPXX-INV %s
+
+define void @fpu_clobber() nounwind {
+entry:
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ ret void
+}
+
+; O32-FPXX-LABEL: fpu_clobber:
+; O32-FPXX-INV-NOT: sdc1 $f0,
+; O32-FPXX-INV-NOT: sdc1 $f1,
+; O32-FPXX-INV-NOT: sdc1 $f2,
+; O32-FPXX-INV-NOT: sdc1 $f3,
+; O32-FPXX-INV-NOT: sdc1 $f4,
+; O32-FPXX-INV-NOT: sdc1 $f5,
+; O32-FPXX-INV-NOT: sdc1 $f6,
+; O32-FPXX-INV-NOT: sdc1 $f7,
+; O32-FPXX-INV-NOT: sdc1 $f8,
+; O32-FPXX-INV-NOT: sdc1 $f9,
+; O32-FPXX-INV-NOT: sdc1 $f10,
+; O32-FPXX-INV-NOT: sdc1 $f11,
+; O32-FPXX-INV-NOT: sdc1 $f12,
+; O32-FPXX-INV-NOT: sdc1 $f13,
+; O32-FPXX-INV-NOT: sdc1 $f14,
+; O32-FPXX-INV-NOT: sdc1 $f15,
+; O32-FPXX-INV-NOT: sdc1 $f16,
+; O32-FPXX-INV-NOT: sdc1 $f17,
+; O32-FPXX-INV-NOT: sdc1 $f18,
+; O32-FPXX-INV-NOT: sdc1 $f19,
+; O32-FPXX-INV-NOT: sdc1 $f21,
+; O32-FPXX-INV-NOT: sdc1 $f23,
+; O32-FPXX-INV-NOT: sdc1 $f25,
+; O32-FPXX-INV-NOT: sdc1 $f27,
+; O32-FPXX-INV-NOT: sdc1 $f29,
+; O32-FPXX-INV-NOT: sdc1 $f31,
+
+; O32-FPXX: addiu $sp, $sp, -48
+; O32-FPXX-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp)
+; O32-FPXX-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+; O32-FPXX-DAG: ldc1 [[F22]], [[OFF22]]($sp)
+; O32-FPXX-DAG: ldc1 [[F24]], [[OFF24]]($sp)
+; O32-FPXX-DAG: ldc1 [[F26]], [[OFF26]]($sp)
+; O32-FPXX-DAG: ldc1 [[F28]], [[OFF28]]($sp)
+; O32-FPXX-DAG: ldc1 [[F30]], [[OFF30]]($sp)
+; O32-FPXX: addiu $sp, $sp, 48
diff --git a/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll b/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll
new file mode 100644
index 0000000..489879e
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll
@@ -0,0 +1,24 @@
+; RUN: llc -march=mips -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=O32-FP64-INV %s
+; RUN: llc -march=mipsel -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=O32-FP64-INV %s
+
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+
+define void @fpu_clobber() nounwind {
+entry:
+ call void asm "# Clobber", "~{$f21}"()
+ ret void
+}
+
+; O32-FPXX-LABEL: fpu_clobber:
+
+; O32-FPXX: addiu $sp, $sp, -8
+
+; O32-FP64-INV-NOT: sdc1 $f20,
+; O32-FPXX-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; O32-FPXX-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+
+; O32-FPXX: addiu $sp, $sp, 8
diff --git a/test/CodeGen/Mips/cmov.ll b/test/CodeGen/Mips/cmov.ll
index b9732eb..999bdb4 100644
--- a/test/CodeGen/Mips/cmov.ll
+++ b/test/CodeGen/Mips/cmov.ll
@@ -1,17 +1,43 @@
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips -regalloc=basic < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=N64
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32 -regalloc=basic < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMP
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMP
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
@i3 = common global i32* null, align 4
-; O32-DAG: lw $[[R0:[0-9]+]], %got(i3)
-; O32-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
-; O32: movn $[[R0]], $[[R1]], ${{[0-9]+}}
-; N64-DAG: ldr $[[R0:[0-9]+]]
-; N64-DAG: ld $[[R1:[0-9]+]], %got_disp(i1)
-; N64: movn $[[R0]], $[[R1]], ${{[0-9]+}}
+; ALL-LABEL: cmov1:
+
+; 32-CMOV-DAG: lw $[[R0:[0-9]+]], %got(i3)
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
+; 32-CMOV-DAG: movn $[[R0]], $[[R1]], $4
+; 32-CMOV-DAG: lw $2, 0($[[R0]])
+
+; 32-CMP-DAG: lw $[[R0:[0-9]+]], %got(i3)
+; 32-CMP-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R1]], $4
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R0]], $4
+; 32-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: lw $2, 0($[[T2]])
+
+; 64-CMOV-DAG: ldr $[[R0:[0-9]+]]
+; 64-CMOV-DAG: ld $[[R1:[0-9]+]], %got_disp(i1)
+; 64-CMOV-DAG: movn $[[R0]], $[[R1]], $4
+
+; 64-CMP-DAG: ld $[[R0:[0-9]+]], %got_disp(i3)(
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(i1)
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64-CMP-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R1]], $[[CC]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R0]], $[[CC]]
+; 64-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: ld $2, 0($[[T2]])
+
define i32* @cmov1(i32 %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
@@ -23,14 +49,35 @@ entry:
@c = global i32 1, align 4
@d = global i32 0, align 4
-; O32-LABEL: cmov2:
-; O32: addiu $[[R1:[0-9]+]], ${{[a-z0-9]+}}, %got(d)
-; O32: addiu $[[R0:[0-9]+]], ${{[a-z0-9]+}}, %got(c)
-; O32: movn $[[R1]], $[[R0]], ${{[0-9]+}}
-; N64-LABEL: cmov2:
-; N64: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
-; N64: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
-; N64: movn $[[R1]], $[[R0]], ${{[0-9]+}}
+; ALL-LABEL: cmov2:
+
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(d)
+; 32-CMOV-DAG: addiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got(c)
+; 32-CMOV-DAG: movn $[[R1]], $[[R0]], $4
+; 32-CMOV-DAG: lw $2, 0($[[R0]])
+
+; 32-CMP-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(d)
+; 32-CMP-DAG: addiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got(c)
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R0]], $4
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R1]], $4
+; 32-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: lw $2, 0($[[T2]])
+
+; 64-CMOV: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
+; 64-CMOV: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
+; 64-CMOV: movn $[[R1]], $[[R0]], $4
+
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
+; 64-CMP-DAG: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64-CMP-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R0]], $[[CC]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R1]], $[[CC]]
+; 64-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: lw $2, 0($[[T2]])
+
define i32 @cmov2(i32 %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
@@ -40,9 +87,28 @@ entry:
ret i32 %cond
}
-; O32-LABEL: cmov3:
-; O32: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: cmov3:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be either one of two registers so we can at least
+; check that.
+
+; 32-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 32-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 64-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
define i32 @cmov3(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%cmp = icmp eq i32 %a, 234
@@ -50,9 +116,36 @@ entry:
ret i32 %cond
}
-; N64-LABEL: cmov4:
-; N64: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: cmov4:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be one of two registers so we can at least check
+; that.
+
+; 32-CMOV-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMOV-DAG: lw $[[R1:2]], 16($sp)
+; 32-CMOV-DAG: lw $[[R2:3]], 20($sp)
+; 32-CMOV-DAG: movz $[[R1]], $6, $[[R0]]
+; 32-CMOV-DAG: movz $[[R2]], $7, $[[R0]]
+
+; 32-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMP-DAG: lw $[[R1:[0-9]+]], 16($sp)
+; 32-CMP-DAG: lw $[[R2:[0-9]+]], 20($sp)
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $6, $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $7, $[[R0]]
+; 32-CMP-DAG: selnez $[[T2:[0-9]+]], $[[R1]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T3:[0-9]+]], $[[R2]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T2]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T3]]
+
+; 64-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 64-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
define i64 @cmov4(i32 %a, i64 %b, i64 %c) nounwind readnone {
entry:
%cmp = icmp eq i32 %a, 234
@@ -68,9 +161,33 @@ entry:
; (movz t, (setlt a, N + 1), f)
; if N + 1 fits in 16-bit.
-; O32-LABEL: slti0:
-; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti0:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti0(i32 %a) {
entry:
@@ -79,19 +196,72 @@ entry:
ret i32 %cond
}
-; O32-LABEL: slti1:
-; O32: slt ${{[0-9]+}}
+; ALL-LABEL: slti1:
+
+; 32-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I32767:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: slt $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti1(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, 32767
- %cond = select i1 %cmp, i32 3, i32 5
+ %cond = select i1 %cmp, i32 7, i32 5
ret i32 %cond
}
-; O32-LABEL: slti2:
-; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti2:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti2(i32 %a) {
entry:
@@ -100,8 +270,41 @@ entry:
ret i32 %cond
}
-; O32-LABEL: slti3:
-; O32: slt ${{[0-9]+}}
+; ALL-LABEL: slti3:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 32-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 32-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 32-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 32-CMP-DAG: slt $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 64-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 64-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[IMM]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti3(i32 %a) {
entry:
@@ -112,30 +315,117 @@ entry:
; 64-bit patterns.
-; N64-LABEL: slti64_0:
-; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti64_0:
+
+; 32-CMOV-DAG: slt $[[CC:[0-9]+]], $zero, $4
+; 32-CMOV-DAG: addiu $[[I32766:[0-9]+]], $zero, 32766
+; 32-CMOV-DAG: sltu $[[R1:[0-9]+]], $[[I32766]], $5
+; 32-CMOV-DAG: movz $[[CC:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMOV-DAG: addiu $[[I4:3]], $zero, 4
+; 32-CMOV-DAG: movn $[[I4]], $[[I5]], $[[CC]]
+; 32-CMOV-DAG: addiu $2, $zero, 0
+
+; 32-CMP-DAG: slt $[[CC0:[0-9]+]], $zero, $4
+; 32-CMP-DAG: addiu $[[I32766:[0-9]+]], $zero, 32766
+; 32-CMP-DAG: sltu $[[CC1:[0-9]+]], $[[I32766]], $5
+; 32-CMP-DAG: selnez $[[CC2:[0-9]+]], $[[CC0]], $4
+; 32-CMP-DAG: seleqz $[[CC3:[0-9]+]], $[[CC1]], $4
+; 32-CMP: or $[[CC:[0-9]+]], $[[CC3]], $[[CC2]]
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I4]], $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[CC]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T0]]
+; 32-CMP-DAG: addiu $2, $zero, 0
+
+; 64-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: addiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by adding/subtracting the result of slti
+; to/from one of the constants.
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_0(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, 32766
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
-; N64-LABEL: slti64_1:
-; N64: slt ${{[0-9]+}}
+; ALL-LABEL: slti64_1:
+
+; 32-CMOV-DAG: slt $[[CC:[0-9]+]], $zero, $4
+; 32-CMOV-DAG: addiu $[[I32766:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: sltu $[[R1:[0-9]+]], $[[I32766]], $5
+; 32-CMOV-DAG: movz $[[CC:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMOV-DAG: addiu $[[I4:3]], $zero, 4
+; 32-CMOV-DAG: movn $[[I4]], $[[I5]], $[[CC]]
+; 32-CMOV-DAG: addiu $2, $zero, 0
+
+; 32-CMP-DAG: slt $[[CC0:[0-9]+]], $zero, $4
+; 32-CMP-DAG: addiu $[[I32766:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: sltu $[[CC1:[0-9]+]], $[[I32766]], $5
+; 32-CMP-DAG: selnez $[[CC2:[0-9]+]], $[[CC0]], $4
+; 32-CMP-DAG: seleqz $[[CC3:[0-9]+]], $[[CC1]], $4
+; 32-CMP: or $[[CC:[0-9]+]], $[[CC3]], $[[CC2]]
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I4]], $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[CC]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T0]]
+; 32-CMP-DAG: addiu $2, $zero, 0
+
+; 64-CMOV-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: daddiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_1(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, 32767
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
-; N64-LABEL: slti64_2:
-; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti64_2:
+
+; FIXME: The 32-bit versions of this test are too complicated to reasonably
+; match at the moment. They do show some missing optimizations though
+; such as:
+; (movz $a, $b, (neg $c)) -> (movn $a, $b, $c)
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I4]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by adding/subtracting the result of slti
+; to/from one of the constants.
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_2(i64 %a) {
entry:
@@ -144,21 +434,64 @@ entry:
ret i64 %conv
}
-; N64-LABEL: slti64_3:
-; N64: slt ${{[0-9]+}}
+; ALL-LABEL: slti64_3:
+
+; FIXME: The 32-bit versions of this test are too complicated to reasonably
+; match at the moment. They do show some missing optimizations though
+; such as:
+; (movz $a, $b, (neg $c)) -> (movn $a, $b, $c)
+
+; 64-CMOV-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, 32766
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, 32766
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_3(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, -32770
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
; sltiu instructions.
-; O32-LABEL: sltiu0:
-; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: sltiu0:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu0(i32 %a) {
entry:
@@ -167,19 +500,72 @@ entry:
ret i32 %cond
}
-; O32-LABEL: sltiu1:
-; O32: sltu ${{[0-9]+}}
+; ALL-LABEL: sltiu1:
+
+; 32-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I32767:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: sltu $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu1(i32 %a) {
entry:
%cmp = icmp ugt i32 %a, 32767
- %cond = select i1 %cmp, i32 3, i32 5
+ %cond = select i1 %cmp, i32 7, i32 5
ret i32 %cond
}
-; O32-LABEL: sltiu2:
-; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: sltiu2:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu2(i32 %a) {
entry:
@@ -188,8 +574,41 @@ entry:
ret i32 %cond
}
-; O32-LABEL: sltiu3:
-; O32: sltu ${{[0-9]+}}
+; ALL-LABEL: sltiu3:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 32-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 32-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 32-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 32-CMP-DAG: sltu $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 64-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 64-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 64-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 64-CMP-DAG: sltu $[[R0:[0-9]+]], $[[IMM]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu3(i32 %a) {
entry:
@@ -210,11 +629,25 @@ define i32 @slti4(i32 %a) nounwind readnone {
ret i32 %2
}
-; O32-LABEL: slti4:
-; O32-DAG: slti [[R1:\$[0-9]+]], $4, 7
-; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
-; O32-NOT: movn
-; O32:.size slti4
+; ALL-LABEL: slti4:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: addiu $2, [[R1]], 3
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: addiu $2, [[R1]], 3
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: addiu $2, [[R1]], 3
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: addiu $2, [[R1]], 3
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
define i32 @slti5(i32 %a) nounwind readnone {
%1 = icmp slt i32 %a, 7
@@ -222,11 +655,25 @@ define i32 @slti5(i32 %a) nounwind readnone {
ret i32 %2
}
-; O32-LABEL: slti5:
-; O32-DAG: slti [[R1:\$[0-9]+]], $4, 7
-; O32-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
-; O32-NOT: movn
-; O32:.size slti5
+; ALL-LABEL: slti5:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
define i32 @slti6(i32 %a) nounwind readnone {
%1 = icmp slt i32 %a, 7
@@ -234,9 +681,26 @@ define i32 @slti6(i32 %a) nounwind readnone {
ret i32 %2
}
-; O32-LABEL: slti6:
-; O32-DAG: slti [[R1:\$[0-9]+]], $4, 7
-; O32-DAG: xori [[R1]], [[R1]], 1
-; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
-; O32-NOT: movn
-; O32:.size slti6
+; ALL-LABEL: slti6:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: xori [[R1]], [[R1]], 1
+; 32-CMOV-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: xori [[R1]], [[R1]], 1
+; 32-CMP-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: xori [[R1]], [[R1]], 1
+; 64-CMOV-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: xori [[R1]], [[R1]], 1
+; 64-CMP-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
diff --git a/test/CodeGen/Mips/countleading.ll b/test/CodeGen/Mips/countleading.ll
new file mode 100644
index 0000000..6e63cff
--- /dev/null
+++ b/test/CodeGen/Mips/countleading.ll
@@ -0,0 +1,90 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R1-R2 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R1-R2 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R6 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS4 %s
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+; R!N: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+
+; Prefixes:
+; ALL - All
+; MIPS32-GT-R1 - MIPS64r1 and above (does not include MIPS64's)
+; MIPS64-GT-R1 - MIPS64r1 and above
+
+define i32 @ctlz_i32(i32 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlz_i32:
+
+; MIPS4-NOT: clz
+
+; MIPS32-GT-R1: clz $2, $4
+
+; MIPS64-GT-R1: clz $2, $4
+
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true)
+ ret i32 %tmp1
+}
+
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+
+define i32 @ctlo_i32(i32 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlo_i32:
+
+; MIPS4-NOT: clo
+
+; MIPS32-GT-R1: clo $2, $4
+
+; MIPS64-GT-R1: clo $2, $4
+
+ %neg = xor i32 %X, -1
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true)
+ ret i32 %tmp1
+}
+
+define i64 @ctlz_i64(i64 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlz_i64:
+
+; MIPS4-NOT: dclz
+
+; MIPS32-GT-R1-DAG: clz $[[R0:[0-9]+]], $4
+; MIPS32-GT-R1-DAG: clz $[[R1:[0-9]+]], $5
+; MIPS32-GT-R1-DAG: addiu $[[R2:2+]], $[[R0]], 32
+; MIPS32-R1-R2-DAG: movn $[[R2]], $[[R1]], $5
+; MIPS32-R6-DAG: seleqz $[[R5:[0-9]+]], $[[R2]], $5
+; MIPS32-R6-DAG: selnez $[[R6:[0-9]+]], $[[R1]], $5
+; MIPS32-R6-DAG: or $2, $[[R6]], $[[R5]]
+; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
+
+; MIPS64-GT-R1: dclz $2, $4
+
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
+ ret i64 %tmp1
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
+
+define i64 @ctlo_i64(i64 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlo_i64:
+
+; MIPS4-NOT: dclo
+
+; MIPS32-GT-R1-DAG: clo $[[R0:[0-9]+]], $4
+; MIPS32-GT-R1-DAG: clo $[[R1:[0-9]+]], $5
+; MIPS32-GT-R1-DAG: addiu $[[R2:2+]], $[[R0]], 32
+; MIPS32-GT-R1-DAG: addiu $[[R3:[0-9]+]], $zero, -1
+; MIPS32-GT-R1-DAG: xor $[[R4:[0-9]+]], $5, $[[R3]]
+; MIPS32-R1-R2-DAG: movn $[[R2]], $[[R1]], $[[R4]]
+; MIPS32-R6-DAG: selnez $[[R5:[0-9]+]], $[[R1]], $[[R4]]
+; MIPS32-R6-DAG: seleqz $[[R6:[0-9]+]], $[[R2]], $[[R4]]
+; MIPS32-R6-DAG: or $2, $[[R5]], $[[R6]]
+; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
+
+; MIPS64-GT-R1: dclo $2, $4
+
+ %neg = xor i64 %X, -1
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
+ ret i64 %tmp1
+}
diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll
index b631c3b..97f8360 100644
--- a/test/CodeGen/Mips/divrem.ll
+++ b/test/CodeGen/Mips/divrem.ll
@@ -1,77 +1,223 @@
-; RUN: llc -march=mips -verify-machineinstrs < %s |\
-; RUN: FileCheck %s -check-prefix=TRAP
-; RUN: llc -march=mips -mno-check-zero-division < %s |\
-; RUN: FileCheck %s -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=ACC32-TRAP
+; RUN: llc -march=mips -mcpu=mips32r2 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=ACC32-TRAP
+; RUN: llc -march=mips -mcpu=mips32r6 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=GPR32-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=ACC64-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64r2 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=ACC64-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64r6 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=GPR64-TRAP
-; TRAP-LABEL: sdiv1:
-; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mflo
+; RUN: llc -march=mips -mcpu=mips32 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32r2 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32r6 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64r6 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=NOCHECK
-; NOCHECK-LABEL: sdiv1:
-; NOCHECK-NOT: teq
-; NOCHECK: .end sdiv1
+; FileCheck Prefixes:
+; ALL - All targets
+; ACC32 - Accumulator based multiply/divide on 32-bit targets
+; ACC64 - Same as ACC32 but only for 64-bit targets
+; GPR32 - GPR based multiply/divide on 32-bit targets
+; GPR64 - Same as GPR32 but only for 64-bit targets
+; ACC32-TRAP - Same as TRAP and ACC32 combined
+; ACC64-TRAP - Same as TRAP and ACC64 combined
+; GPR32-TRAP - Same as TRAP and GPR32 combined
+; GPR64-TRAP - Same as TRAP and GPR64 combined
+; NOCHECK - Division by zero will not be detected
@g0 = common global i32 0, align 4
@g1 = common global i32 0, align 4
define i32 @sdiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: sdiv1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: div $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: div $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mflo $2
+; ACC64: mflo $2
+
+; ALL: .end sdiv1
+
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-; TRAP-LABEL: srem1:
-; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mfhi
-
define i32 @srem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: srem1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: mod $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: mod $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mfhi $2
+; ACC64: mfhi $2
+
+; ALL: .end srem1
+
%rem = srem i32 %a0, %a1
ret i32 %rem
}
-; TRAP-LABEL: udiv1:
-; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mflo
-
define i32 @udiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: udiv1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: divu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: divu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mflo $2
+; ACC64: mflo $2
+
+; ALL: .end udiv1
%div = udiv i32 %a0, %a1
ret i32 %div
}
-; TRAP-LABEL: urem1:
-; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mfhi
-
define i32 @urem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: urem1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: modu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: modu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mfhi $2
+; ACC64: mfhi $2
+
+; ALL: .end urem1
+
%rem = urem i32 %a0, %a1
ret i32 %rem
}
-; TRAP: div $zero,
define i32 @sdivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
+; ALL-LABEL: sdivrem1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC32: mflo $2
+; ACC32: mfhi $[[R0:[0-9]+]]
+; ACC32: sw $[[R0]], 0(${{[0-9]+}})
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sw $[[R0]], 0(${{[0-9]+}})
+
+; GPR32: mod $[[R0:[0-9]+]], $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR32: sw $[[R0]], 0(${{[0-9]+}})
+; GPR32-DAG: div $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: mod $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sw $[[R0]], 0(${{[0-9]+}})
+; GPR64-DAG: div $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end sdivrem1
+
%rem = srem i32 %a0, %a1
store i32 %rem, i32* %r, align 4
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-; TRAP: divu $zero,
define i32 @udivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
+; ALL-LABEL: udivrem1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC32: mflo $2
+; ACC32: mfhi $[[R0:[0-9]+]]
+; ACC32: sw $[[R0]], 0(${{[0-9]+}})
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sw $[[R0]], 0(${{[0-9]+}})
+
+; GPR32: modu $[[R0:[0-9]+]], $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR32: sw $[[R0]], 0(${{[0-9]+}})
+; GPR32-DAG: divu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; GPR64: modu $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sw $[[R0]], 0(${{[0-9]+}})
+; GPR64-DAG: divu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end udivrem1
+
%rem = urem i32 %a0, %a1
store i32 %rem, i32* %r, align 4
%div = udiv i32 %a0, %a1
ret i32 %div
}
+; FIXME: It's not clear what this is supposed to test.
define i32 @killFlags() {
entry:
%0 = load i32* @g0, align 4
@@ -79,3 +225,164 @@ entry:
%div = sdiv i32 %0, %1
ret i32 %div
}
+
+define i64 @sdiv2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: sdiv2:
+
+; ACC32: lw $25, %call16(__divdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddiv $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: ddiv $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mflo $2
+
+; ALL: .end sdiv2
+
+ %div = sdiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @srem2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: srem2:
+
+; ACC32: lw $25, %call16(__moddi3)(
+; ACC32: jalr $25
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: dmod $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mfhi $2
+
+; ALL: .end srem2
+
+ %rem = srem i64 %a0, %a1
+ ret i64 %rem
+}
+
+define i64 @udiv2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: udiv2:
+
+; ACC32: lw $25, %call16(__udivdi3)(
+; ACC32: jalr $25
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: ddivu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mflo $2
+
+; ALL: .end udiv2
+ %div = udiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @urem2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: urem2:
+
+; ACC32: lw $25, %call16(__umoddi3)(
+; ACC32: jalr $25
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: dmodu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mfhi $2
+
+; ALL: .end urem2
+
+ %rem = urem i64 %a0, %a1
+ ret i64 %rem
+}
+
+define i64 @sdivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+entry:
+; ALL-LABEL: sdivrem2:
+
+; sdivrem2 is too complex to effectively check. We can at least check for the
+; calls though.
+; ACC32: lw $25, %call16(__moddi3)(
+; ACC32: jalr $25
+; ACC32: lw $25, %call16(__divdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddiv $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64: dmod $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64-DAG: ddiv $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end sdivrem2
+
+ %rem = srem i64 %a0, %a1
+ store i64 %rem, i64* %r, align 8
+ %div = sdiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @udivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+entry:
+; ALL-LABEL: udivrem2:
+
+; udivrem2 is too complex to effectively check. We can at least check for the
+; calls though.
+; ACC32: lw $25, %call16(__umoddi3)(
+; ACC32: jalr $25
+; ACC32: lw $25, %call16(__udivdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddivu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64: dmodu $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64-DAG: ddivu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end udivrem2
+
+ %rem = urem i64 %a0, %a1
+ store i64 %rem, i64* %r, align 8
+ %div = udiv i64 %a0, %a1
+ ret i64 %div
+}
diff --git a/test/CodeGen/Mips/dsp-r1.ll b/test/CodeGen/Mips/dsp-r1.ll
index acdd17d..fbd9703 100644
--- a/test/CodeGen/Mips/dsp-r1.ll
+++ b/test/CodeGen/Mips/dsp-r1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mattr=+dsp < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+dsp < %s | FileCheck %s
define i32 @test__builtin_mips_extr_w1(i32 %i0, i32, i64 %a0) nounwind {
entry:
diff --git a/test/CodeGen/Mips/eh-return32.ll b/test/CodeGen/Mips/eh-return32.ll
index c3003b3..748050c 100644
--- a/test/CodeGen/Mips/eh-return32.ll
+++ b/test/CodeGen/Mips/eh-return32.ll
@@ -1,4 +1,6 @@
-; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mipsel -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mipsel -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=R6
declare void @llvm.eh.return.i32(i32, i8*)
declare void @foo(...)
@@ -9,7 +11,7 @@ entry:
call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
unreachable
-; CHECK: f1
+; CHECK: f1:
; CHECK: addiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -41,7 +43,8 @@ entry:
; CHECK: addiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: addu $sp, $sp, $3
}
@@ -50,7 +53,7 @@ entry:
call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
unreachable
-; CHECK: f2
+; CHECK: f2:
; CHECK: addiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -80,6 +83,7 @@ entry:
; CHECK: addiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: addu $sp, $sp, $3
}
diff --git a/test/CodeGen/Mips/eh-return64.ll b/test/CodeGen/Mips/eh-return64.ll
index 8c5af50..74a4323 100644
--- a/test/CodeGen/Mips/eh-return64.ll
+++ b/test/CodeGen/Mips/eh-return64.ll
@@ -1,5 +1,7 @@
-; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=R6
declare void @llvm.eh.return.i64(i64, i8*)
declare void @foo(...)
@@ -10,7 +12,7 @@ entry:
call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
unreachable
-; CHECK: f1
+; CHECK: f1:
; CHECK: daddiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -42,9 +44,9 @@ entry:
; CHECK: daddiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: daddu $sp, $sp, $3
-
}
define void @f2(i64 %offset, i8* %handler) {
@@ -52,7 +54,7 @@ entry:
call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
unreachable
-; CHECK: f2
+; CHECK: f2:
; CHECK: .cfi_startproc
; CHECK: daddiu $sp, $sp, -[[spoffset:[0-9]+]]
; CHECK: .cfi_def_cfa_offset [[spoffset]]
@@ -84,7 +86,8 @@ entry:
; CHECK: daddiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: daddu $sp, $sp, $3
; CHECK: .cfi_endproc
}
diff --git a/test/CodeGen/Mips/ehframe-indirect.ll b/test/CodeGen/Mips/ehframe-indirect.ll
new file mode 100644
index 0000000..e78497a
--- /dev/null
+++ b/test/CodeGen/Mips/ehframe-indirect.ll
@@ -0,0 +1,34 @@
+; RUN: llc -mtriple=mipsel-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-android < %s | FileCheck %s
+
+define i32 @main() {
+; CHECK: .cfi_startproc
+; CHECK: .cfi_personality 128, DW.ref.__gxx_personality_v0
+
+entry:
+ invoke void @foo() to label %cont unwind label %lpad
+; CHECK: foo
+; CHECK: jalr
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8*
+ bitcast (i32 (...)* @__gxx_personality_v0 to i8*) catch i8* null
+ ret i32 0
+
+cont:
+ ret i32 0
+}
+; CHECK: .cfi_endproc
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @foo()
+
+; CHECK: .hidden DW.ref.__gxx_personality_v0
+; CHECK: .weak DW.ref.__gxx_personality_v0
+; CHECK: .section .data.DW.ref.__gxx_personality_v0,"aGw",@progbits,DW.ref.__gxx_personality_v0,comdat
+; CHECK: .align 2
+; CHECK: .type DW.ref.__gxx_personality_v0,@object
+; CHECK: .size DW.ref.__gxx_personality_v0, 4
+; CHECK: DW.ref.__gxx_personality_v0:
+; CHECK: .4byte __gxx_personality_v0
diff --git a/test/CodeGen/Mips/fcmp.ll b/test/CodeGen/Mips/fcmp.ll
new file mode 100644
index 0000000..b775983
--- /dev/null
+++ b/test/CodeGen/Mips/fcmp.ll
@@ -0,0 +1,783 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32-C
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32-C
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMP
+; RUN: llc < %s -march=mips64el -mcpu=mips4 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMP
+
+define i32 @false_f32(float %a, float %b) nounwind {
+; ALL-LABEL: false_f32:
+; ALL: addiu $2, $zero, 0
+
+ %1 = fcmp false float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oeq_f32(float %a, float %b) nounwind {
+; ALL-LABEL: oeq_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oeq float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ogt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ogt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ogt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oge_f32(float %a, float %b) nounwind {
+; ALL-LABEL: oge_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oge float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @olt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: olt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp olt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ole_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ole_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ole float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @one_f32(float %a, float %b) nounwind {
+; ALL-LABEL: one_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp one float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ord_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ord_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp ord float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ueq_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ueq_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ueq float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ugt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ugt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ugt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uge_f32(float %a, float %b) nounwind {
+; ALL-LABEL: uge_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uge float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ult_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ult_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ult float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ule_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ule_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ule float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @une_f32(float %a, float %b) nounwind {
+; ALL-LABEL: une_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp une float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uno_f32(float %a, float %b) nounwind {
+; ALL-LABEL: uno_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uno float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @true_f32(float %a, float %b) nounwind {
+; ALL-LABEL: true_f32:
+; ALL: addiu $2, $zero, 1
+
+ %1 = fcmp true float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @false_f64(double %a, double %b) nounwind {
+; ALL-LABEL: false_f64:
+; ALL: addiu $2, $zero, 0
+
+ %1 = fcmp false double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oeq_f64(double %a, double %b) nounwind {
+; ALL-LABEL: oeq_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oeq double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ogt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ogt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ogt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oge_f64(double %a, double %b) nounwind {
+; ALL-LABEL: oge_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oge double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @olt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: olt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp olt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ole_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ole_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ole double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @one_f64(double %a, double %b) nounwind {
+; ALL-LABEL: one_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp one double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ord_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ord_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp ord double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ueq_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ueq_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ueq double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ugt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ugt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ugt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uge_f64(double %a, double %b) nounwind {
+; ALL-LABEL: uge_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uge double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ult_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ult_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ult double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ule_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ule_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ule double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @une_f64(double %a, double %b) nounwind {
+; ALL-LABEL: une_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp une double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uno_f64(double %a, double %b) nounwind {
+; ALL-LABEL: uno_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uno double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @true_f64(double %a, double %b) nounwind {
+; ALL-LABEL: true_f64:
+; ALL: addiu $2, $zero, 1
+
+ %1 = fcmp true double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
diff --git a/test/CodeGen/Mips/fcopysign.ll b/test/CodeGen/Mips/fcopysign.ll
index 44c4117..3a9d9c7 100644
--- a/test/CodeGen/Mips/fcopysign.ll
+++ b/test/CodeGen/Mips/fcopysign.ll
@@ -17,7 +17,7 @@ entry:
; 32R2: ext $[[EXT:[0-9]+]], ${{[0-9]+}}, 31, 1
; 32R2: ins $[[INS:[0-9]+]], $[[EXT]], 31, 1
-; 32R2: mtc1 $[[INS]], $f1
+; 32R2: mthc1 $[[INS]], $f0
; 64: daddiu $[[T0:[0-9]+]], $zero, 1
; 64: dsll $[[MSK1:[0-9]+]], $[[T0]], 63
diff --git a/test/CodeGen/Mips/fmadd1.ll b/test/CodeGen/Mips/fmadd1.ll
index a9a8e21..271631e 100644
--- a/test/CodeGen/Mips/fmadd1.ll
+++ b/test/CodeGen/Mips/fmadd1.ll
@@ -5,15 +5,54 @@
; IEEE 754 (1985) and IEEE 754 (2008). These instructions are therefore only
; available when -enable-no-nans-fp-math is given.
-; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -enable-no-nans-fp-math | FileCheck %s -check-prefix=32R2 -check-prefix=CHECK
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=64R2 -check-prefix=CHECK
-; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2NAN -check-prefix=CHECK
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2NAN -check-prefix=CHECK
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NAN
define float @FOO0float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO0float:
-; CHECK: madd.s
+; ALL-LABEL: FOO0float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R2: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2: madd.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64R2: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.s $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.s $[[T0:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.s $[[T1:f[0-9]+]], $[[T0]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
%mul = fmul float %a, %b
%add = fadd float %mul, %c
%add1 = fadd float %add, 0.000000e+00
@@ -22,8 +61,39 @@ entry:
define float @FOO1float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO1float:
-; CHECK: msub.s
+; ALL-LABEL: FOO1float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R2: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2: msub.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64R2: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.s $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.s $[[T0:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.s $[[T1:f[0-9]+]], $[[T0]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
%mul = fmul float %a, %b
%sub = fsub float %mul, %c
%add = fadd float %sub, 0.000000e+00
@@ -32,11 +102,44 @@ entry:
define float @FOO2float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO2float:
-; 32R2: nmadd.s
-; 64R2: nmadd.s
-; 32R2NAN: madd.s
-; 64R2NAN: madd.s
+; ALL-LABEL: FOO2float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NONAN: nmadd.s $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NAN: madd.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmadd.s $f0, $f14, $f12, $f13
+
+; 64R2-NAN: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.s $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
%mul = fmul float %a, %b
%add = fadd float %mul, %c
%sub = fsub float 0.000000e+00, %add
@@ -45,11 +148,36 @@ entry:
define float @FOO3float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO3float:
-; 32R2: nmsub.s
-; 64R2: nmsub.s
-; 32R2NAN: msub.s
-; 64R2NAN: msub.s
+; ALL-LABEL: FOO3float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NONAN: nmsub.s $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NAN: msub.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64R2-NAN: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.s $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
%mul = fmul float %a, %b
%sub = fsub float %mul, %c
%sub1 = fsub float 0.000000e+00, %sub
@@ -58,8 +186,40 @@ entry:
define double @FOO10double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO10double:
-; CHECK: madd.d
+; ALL-LABEL: FOO10double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R2: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2: madd.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: mthc1 $zero, $[[T2]]
+; 32R2: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64R2: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.d $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
%mul = fmul double %a, %b
%add = fadd double %mul, %c
%add1 = fadd double %add, 0.000000e+00
@@ -68,8 +228,40 @@ entry:
define double @FOO11double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO11double:
-; CHECK: msub.d
+; ALL-LABEL: FOO11double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R2: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2: msub.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: mthc1 $zero, $[[T2]]
+; 32R2: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64R2: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.d $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
%mul = fmul double %a, %b
%sub = fsub double %mul, %c
%add = fadd double %sub, 0.000000e+00
@@ -78,11 +270,45 @@ entry:
define double @FOO12double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO12double:
-; 32R2: nmadd.d
-; 64R2: nmadd.d
-; 32R2NAN: madd.d
-; 64R2NAN: madd.d
+; ALL-LABEL: FOO12double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NONAN: nmadd.d $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NAN: madd.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: mthc1 $zero, $[[T2]]
+; 32R2-NAN: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmadd.d $f0, $f14, $f12, $f13
+
+; 64R2-NAN: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.d $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
%mul = fmul double %a, %b
%add = fadd double %mul, %c
%sub = fsub double 0.000000e+00, %add
@@ -91,11 +317,45 @@ entry:
define double @FOO13double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK-LABEL: FOO13double:
-; 32R2: nmsub.d
-; 64R2: nmsub.d
-; 32R2NAN: msub.d
-; 64R2NAN: msub.d
+; ALL-LABEL: FOO13double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NONAN: nmsub.d $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NAN: msub.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: mthc1 $zero, $[[T2]]
+; 32R2-NAN: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmsub.d $f0, $f14, $f12, $f13
+
+; 64R2-NAN: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.d $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
%mul = fmul double %a, %b
%sub = fsub double %mul, %c
%sub1 = fsub double 0.000000e+00, %sub
diff --git a/test/CodeGen/Mips/fp-indexed-ls.ll b/test/CodeGen/Mips/fp-indexed-ls.ll
index d8c37e7..787e131 100644
--- a/test/CodeGen/Mips/fp-indexed-ls.ll
+++ b/test/CodeGen/Mips/fp-indexed-ls.ll
@@ -1,6 +1,13 @@
-; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s
-; RUN: llc -mtriple=mipsel-none-nacl-gnu -mcpu=mips32r2 < %s \
-; RUN: | FileCheck %s -check-prefix=CHECK-NACL
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R1
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R2
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R6
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64R6
+
+; Check that [ls][dwu]xc1 are not emitted for nacl.
+; RUN: llc -mtriple=mipsel-none-nacl-gnu -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=CHECK-NACL
%struct.S = type <{ [4 x float] }>
%struct.S2 = type <{ [4 x double] }>
@@ -14,8 +21,30 @@
define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
entry:
-; CHECK: lwxc1
+; ALL-LABEL: foo0:
+
+; MIPS32R1: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R1: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R1: lwc1 $f0, 0($[[T3]])
+
+; MIPS32R2: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R2: lwxc1 $f0, $[[T1]]($4)
+
+; MIPS32R6: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R6: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R6: lwc1 $f0, 0($[[T3]])
+
+; MIPS4: sll $[[T0:[0-9]+]], $5, 0
+; MIPS4: dsll $[[T1:[0-9]+]], $[[T0]], 2
+; MIPS4: lwxc1 $f0, $[[T1]]($4)
+
+; MIPS64R6: sll $[[T0:[0-9]+]], $5, 0
+; MIPS64R6: dsll $[[T1:[0-9]+]], $[[T0]], 2
+; MIPS64R6: daddu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS64R6: lwc1 $f0, 0($[[T3]])
+
; CHECK-NACL-NOT: lwxc1
+
%arrayidx = getelementptr inbounds float* %b, i32 %o
%0 = load float* %arrayidx, align 4
ret float %0
@@ -23,8 +52,30 @@ entry:
define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
entry:
-; CHECK: ldxc1
+; ALL-LABEL: foo1:
+
+; MIPS32R1: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R1: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R1: ldc1 $f0, 0($[[T3]])
+
+; MIPS32R2: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R2: ldxc1 $f0, $[[T1]]($4)
+
+; MIPS32R6: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R6: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R6: ldc1 $f0, 0($[[T3]])
+
+; MIPS4: sll $[[T0:[0-9]+]], $5, 0
+; MIPS4: dsll $[[T1:[0-9]+]], $[[T0]], 3
+; MIPS4: ldxc1 $f0, $[[T1]]($4)
+
+; MIPS64R6: sll $[[T0:[0-9]+]], $5, 0
+; MIPS64R6: dsll $[[T1:[0-9]+]], $[[T0]], 3
+; MIPS64R6: daddu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS64R6: ldc1 $f0, 0($[[T3]])
+
; CHECK-NACL-NOT: ldxc1
+
%arrayidx = getelementptr inbounds double* %b, i32 %o
%0 = load double* %arrayidx, align 8
ret double %0
@@ -32,7 +83,23 @@ entry:
define float @foo2(i32 %b, i32 %c) nounwind readonly {
entry:
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo2:
+
+; luxc1 did not exist in MIPS32r1
+; MIPS32R1-NOT: luxc1
+
+; luxc1 is a misnomer since it aligns the given pointer downwards and performs
+; an aligned load. We mustn't use it to handle unaligned loads.
+; MIPS32R2-NOT: luxc1
+
+; luxc1 was removed in MIPS32r6
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; luxc1 was removed in MIPS64r6
+; MIPS64R6-NOT: luxc1
+
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
%0 = load float* %arrayidx1, align 1
ret float %0
@@ -40,8 +107,28 @@ entry:
define void @foo3(float* nocapture %b, i32 %o) nounwind {
entry:
-; CHECK: swxc1
+; ALL-LABEL: foo3:
+
+; MIPS32R1-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R1-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R1-DAG: swc1 $[[T0]], 0($[[T1]])
+
+; MIPS32R2: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R2: swxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS32R6-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R6-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R6-DAG: swc1 $[[T0]], 0($[[T1]])
+
+; MIPS4: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS4: swxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS64R6-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS64R6-DAG: daddu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS64R6-DAG: swc1 $[[T0]], 0($[[T1]])
+
; CHECK-NACL-NOT: swxc1
+
%0 = load float* @gf, align 4
%arrayidx = getelementptr inbounds float* %b, i32 %o
store float %0, float* %arrayidx, align 4
@@ -50,8 +137,28 @@ entry:
define void @foo4(double* nocapture %b, i32 %o) nounwind {
entry:
-; CHECK: sdxc1
+; ALL-LABEL: foo4:
+
+; MIPS32R1-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R1-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R1-DAG: sdc1 $[[T0]], 0($[[T1]])
+
+; MIPS32R2: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R2: sdxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS32R6-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R6-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R6-DAG: sdc1 $[[T0]], 0($[[T1]])
+
+; MIPS4: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS4: sdxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS64R6-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS64R6-DAG: daddu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS64R6-DAG: sdc1 $[[T0]], 0($[[T1]])
+
; CHECK-NACL-NOT: sdxc1
+
%0 = load double* @gd, align 8
%arrayidx = getelementptr inbounds double* %b, i32 %o
store double %0, double* %arrayidx, align 8
@@ -60,7 +167,18 @@ entry:
define void @foo5(i32 %b, i32 %c) nounwind {
entry:
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo5:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
%0 = load float* @gf, align 4
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
store float %0, float* %arrayidx1, align 1
@@ -69,8 +187,18 @@ entry:
define double @foo6(i32 %b, i32 %c) nounwind readonly {
entry:
-; CHECK: foo6
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo6:
+
+; MIPS32R1-NOT: luxc1
+
+; MIPS32R2-NOT: luxc1
+
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; MIPS64R6-NOT: luxc1
+
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
%0 = load double* %arrayidx1, align 1
ret double %0
@@ -78,8 +206,18 @@ entry:
define void @foo7(i32 %b, i32 %c) nounwind {
entry:
-; CHECK: foo7
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo7:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
%0 = load double* @gd, align 8
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
store double %0, double* %arrayidx1, align 1
@@ -88,16 +226,36 @@ entry:
define float @foo8() nounwind readonly {
entry:
-; CHECK: foo8
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo8:
+
+; MIPS32R1-NOT: luxc1
+
+; MIPS32R2-NOT: luxc1
+
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; MIPS64R6-NOT: luxc1
+
%0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
ret float %0
}
define void @foo9(float %f) nounwind {
entry:
-; CHECK: foo9
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo9:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
store float %f, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
ret void
}
diff --git a/test/CodeGen/Mips/fpbr.ll b/test/CodeGen/Mips/fpbr.ll
index a136557..311b830 100644
--- a/test/CodeGen/Mips/fpbr.ll
+++ b/test/CodeGen/Mips/fpbr.ll
@@ -1,9 +1,25 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=32-FCC
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=32-FCC
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=32-GPR
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=64-FCC
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=64-FCC
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=64-GPR
define void @func0(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.eq.s
-; CHECK: bc1f
+; ALL-LABEL: func0:
+
+; 32-FCC: c.eq.s $f12, $f14
+; 64-FCC: c.eq.s $f12, $f13
+; FCC: bc1f $BB0_2
+
+; 32-GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $f14
+; 64-GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $f13
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; FIXME: We ought to be able to transform not+bnez -> beqz
+; GPR: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB0_2
+
%cmp = fcmp oeq float %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -25,8 +41,18 @@ declare void @g1(...)
define void @func1(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.olt.s
-; CHECK: bc1f
+; ALL-LABEL: func1:
+
+; 32-FCC: c.olt.s $f12, $f14
+; 64-FCC: c.olt.s $f12, $f13
+; FCC: bc1f $BB1_2
+
+; 32-GPR: cmp.ule.s $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ule.s $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB1_2
+
%cmp = fcmp olt float %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -44,8 +70,18 @@ if.end: ; preds = %if.else, %if.then
define void @func2(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.ole.s
-; CHECK: bc1t
+; ALL-LABEL: func2:
+
+; 32-FCC: c.ole.s $f12, $f14
+; 64-FCC: c.ole.s $f12, $f13
+; FCC: bc1t $BB2_2
+
+; 32-GPR: cmp.ult.s $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ult.s $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB2_2
+
%cmp = fcmp ugt float %f2, %f3
br i1 %cmp, label %if.else, label %if.then
@@ -63,8 +99,19 @@ if.end: ; preds = %if.else, %if.then
define void @func3(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.eq.d
-; CHECK: bc1f
+; ALL-LABEL: func3:
+
+; 32-FCC: c.eq.d $f12, $f14
+; 64-FCC: c.eq.d $f12, $f13
+; FCC: bc1f $BB3_2
+
+; 32-GPR: cmp.eq.d $[[FGRCC:f[0-9]+]], $f12, $f14
+; 64-GPR: cmp.eq.d $[[FGRCC:f[0-9]+]], $f12, $f13
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; FIXME: We ought to be able to transform not+bnez -> beqz
+; GPR: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB3_2
+
%cmp = fcmp oeq double %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -82,8 +129,18 @@ if.end: ; preds = %if.else, %if.then
define void @func4(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.olt.d
-; CHECK: bc1f
+; ALL-LABEL: func4:
+
+; 32-FCC: c.olt.d $f12, $f14
+; 64-FCC: c.olt.d $f12, $f13
+; FCC: bc1f $BB4_2
+
+; 32-GPR: cmp.ule.d $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ule.d $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB4_2
+
%cmp = fcmp olt double %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -101,8 +158,18 @@ if.end: ; preds = %if.else, %if.then
define void @func5(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.ole.d
-; CHECK: bc1t
+; ALL-LABEL: func5:
+
+; 32-FCC: c.ole.d $f12, $f14
+; 64-FCC: c.ole.d $f12, $f13
+; FCC: bc1t $BB5_2
+
+; 32-GPR: cmp.ult.d $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ult.d $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB5_2
+
%cmp = fcmp ugt double %f2, %f3
br i1 %cmp, label %if.else, label %if.then
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
index 9464918..a67ddce 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
@@ -1,6 +1,7 @@
; Positive test for inline register constraints
;
-; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Mips/lit.local.cfg b/test/CodeGen/Mips/lit.local.cfg
index 1fa54b4..a3183a2 100644
--- a/test/CodeGen/Mips/lit.local.cfg
+++ b/test/CodeGen/Mips/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Mips' in targets:
+if not 'Mips' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Mips/llvm-ir/call.ll b/test/CodeGen/Mips/llvm-ir/call.ll
new file mode 100644
index 0000000..4cbf43c
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/call.ll
@@ -0,0 +1,166 @@
+; Test the 'call' instruction and the tailcall variant.
+
+; FIXME: We should remove the need for -enable-mips-tail-calls
+; RUN: llc -march=mips -mcpu=mips32 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips64 -mcpu=mips4 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+
+declare void @extern_void_void()
+declare i32 @extern_i32_void()
+declare float @extern_float_void()
+
+define i32 @call_void_void() {
+; ALL-LABEL: call_void_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+ call void @extern_void_void()
+ ret i32 0
+}
+
+define i32 @call_i32_void() {
+; ALL-LABEL: call_i32_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+ %1 = call i32 @extern_i32_void()
+ %2 = add i32 %1, 1
+ ret i32 %2
+}
+
+define float @call_float_void() {
+; ALL-LABEL: call_float_void:
+
+; FIXME: Not sure why we don't use $gp directly on such a simple test. We should
+; look into it at some point.
+; O32: addu $[[GP:[0-9]+]], ${{[0-9]+}}, $25
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_float_void)($[[GP]])
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+; O32: move $gp, $[[GP]]
+
+ %1 = call float @extern_float_void()
+ %2 = fadd float %1, 1.0
+ ret float %2
+}
+
+define void @musttail_call_void_void() {
+; ALL-LABEL: musttail_call_void_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ musttail call void @extern_void_void()
+ ret void
+}
+
+define i32 @musttail_call_i32_void() {
+; ALL-LABEL: musttail_call_i32_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ %1 = musttail call i32 @extern_i32_void()
+ ret i32 %1
+}
+
+define float @musttail_call_float_void() {
+; ALL-LABEL: musttail_call_float_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ %1 = musttail call float @extern_float_void()
+ ret float %1
+}
+
+define i32 @indirect_call_void_void(void ()* %addr) {
+; ALL-LABEL: indirect_call_void_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ call void %addr()
+ ret i32 0
+}
+
+define i32 @indirect_call_i32_void(i32 ()* %addr) {
+; ALL-LABEL: indirect_call_i32_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ %1 = call i32 %addr()
+ %2 = add i32 %1, 1
+ ret i32 %2
+}
+
+define float @indirect_call_float_void(float ()* %addr) {
+; ALL-LABEL: indirect_call_float_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ %1 = call float %addr()
+ %2 = fadd float %1, 1.0
+ ret float %2
+}
+
+; We can't use 'musttail' here because the verifier is too conservative and
+; prohibits any prototype difference.
+define void @tail_indirect_call_void_void(void ()* %addr) {
+; ALL-LABEL: tail_indirect_call_void_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ tail call void %addr()
+ ret void
+}
+
+define i32 @tail_indirect_call_i32_void(i32 ()* %addr) {
+; ALL-LABEL: tail_indirect_call_i32_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ %1 = tail call i32 %addr()
+ ret i32 %1
+}
+
+define float @tail_indirect_call_float_void(float ()* %addr) {
+; ALL-LABEL: tail_indirect_call_float_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ %1 = tail call float %addr()
+ ret float %1
+}
diff --git a/test/CodeGen/Mips/llvm-ir/indirectbr.ll b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
new file mode 100644
index 0000000..d8fd787
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
@@ -0,0 +1,34 @@
+; Test all important variants of the unconditional 'br' instruction.
+
+; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
+; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
+
+define i32 @br(i8 *%addr) {
+; ALL-LABEL: br:
+; NOT-R6: jr $4 # <MCInst #{{[0-9]+}} JR
+; R6: jr $4 # <MCInst #{{[0-9]+}} JALR
+
+; ALL: $BB0_1: # %L1
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
+; ALL: addiu $2, $zero, 0
+
+; ALL: $BB0_2: # %L2
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
+; ALL: addiu $2, $zero, 1
+
+entry:
+ indirectbr i8* %addr, [label %L1, label %L2]
+
+L1:
+ ret i32 0
+
+L2:
+ ret i32 1
+}
diff --git a/test/CodeGen/Mips/llvm-ir/ret.ll b/test/CodeGen/Mips/llvm-ir/ret.ll
new file mode 100644
index 0000000..8f5b115
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/ret.ll
@@ -0,0 +1,205 @@
+; Test all important variants of the 'ret' instruction.
+;
+; For non-void returns it is necessary to have something to return so we also
+; test constant generation here.
+;
+; We'll test pointer returns in a separate file since the relocation model
+; affects it and it's undesirable to repeat the non-pointer returns for each
+; relocation model.
+
+; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NO-MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=R6
+; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=R6
+
+define void @ret_void() {
+; ALL-LABEL: ret_void:
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret void
+}
+
+define i8 @ret_i8() {
+; ALL-LABEL: ret_i8:
+; ALL-DAG: addiu $2, $zero, 3
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i8 3
+}
+
+define i16 @ret_i16_3() {
+; ALL-LABEL: ret_i16_3:
+; ALL-DAG: addiu $2, $zero, 3
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 3
+}
+
+define i16 @ret_i16_256() {
+; ALL-LABEL: ret_i16_256:
+; ALL-DAG: addiu $2, $zero, 256
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 256
+}
+
+define i16 @ret_i16_257() {
+; ALL-LABEL: ret_i16_257:
+; ALL-DAG: addiu $2, $zero, 257
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 257
+}
+
+define i32 @ret_i32_257() {
+; ALL-LABEL: ret_i32_257:
+; ALL-DAG: addiu $2, $zero, 257
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 257
+}
+
+define i32 @ret_i32_65536() {
+; ALL-LABEL: ret_i32_65536:
+; ALL-DAG: lui $2, 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 65536
+}
+
+define i32 @ret_i32_65537() {
+; ALL-LABEL: ret_i32_65537:
+; ALL: lui $[[T0:[0-9]+]], 1
+; ALL-DAG: ori $2, $[[T0]], 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 65537
+}
+
+define i64 @ret_i64_65537() {
+; ALL-LABEL: ret_i64_65537:
+; ALL: lui $[[T0:[0-9]+]], 1
+
+; GPR32-DAG: ori $3, $[[T0]], 1
+; GPR32-DAG: addiu $2, $zero, 0
+
+; GPR64-DAG: daddiu $2, $[[T0]], 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 65537
+}
+
+define i64 @ret_i64_281479271677952() {
+; ALL-LABEL: ret_i64_281479271677952:
+; ALL-DAG: lui $[[T0:[0-9]+]], 1
+
+; GPR32-DAG: ori $2, $[[T0]], 1
+; GPR32-DAG: addiu $3, $zero, 0
+
+; GPR64-DAG: daddiu $[[T1:[0-9]+]], $[[T0]], 1
+; GPR64-DAG: dsll $2, $[[T1]], 32
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 281479271677952
+}
+
+define i64 @ret_i64_281479271809026() {
+; ALL-LABEL: ret_i64_281479271809026:
+; GPR32-DAG: lui $[[T0:[0-9]+]], 1
+; GPR32-DAG: lui $[[T1:[0-9]+]], 2
+; GPR32-DAG: ori $2, $[[T0]], 1
+; GPR32-DAG: ori $3, $[[T1]], 2
+
+; GPR64-DAG: ori $[[T0:[0-9]+]], $zero, 32769
+; GPR64-DAG: dsll $[[T1:[0-9]+]], $[[T0]], 16
+; GPR64-DAG: daddiu $[[T0:[0-9]+]], $[[T0]], -32767
+; GPR64-DAG: dsll $[[T1:[0-9]+]], $[[T0]], 17
+; GPR64-DAG: daddiu $2, $[[T1]], 2
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 281479271809026
+}
+
+define float @ret_float_0x0() {
+; ALL-LABEL: ret_float_0x0:
+
+; NO-MTHC1-DAG: mtc1 $zero, $f0
+
+; MTHC1-DAG: mtc1 $zero, $f0
+
+; DMTC-DAG: dmtc1 $zero, $f0
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret float 0x0000000000000000
+}
+
+define float @ret_float_0x3() {
+; ALL-LABEL: ret_float_0x3:
+
+; Use a constant pool
+; O32-DAG: lwc1 $f0, %lo($CPI
+; N64-DAG: lwc1 $f0, %got_ofst($CPI
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+; float constants are written as double constants
+ ret float 0x36b8000000000000
+}
+
+define double @ret_double_0x0() {
+; ALL-LABEL: ret_double_0x0:
+
+; NO-MTHC1-DAG: mtc1 $zero, $f0
+; NO-MTHC1-DAG: mtc1 $zero, $f1
+
+; MTHC1-DAG: mtc1 $zero, $f0
+; MTHC1-DAG: mthc1 $zero, $f0
+
+; DMTC-DAG: dmtc1 $zero, $f0
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret double 0x0000000000000000
+}
+
+define double @ret_double_0x3() {
+; ALL-LABEL: ret_double_0x3:
+
+; Use a constant pool
+; O32-DAG: ldc1 $f0, %lo($CPI
+; N64-DAG: ldc1 $f0, %got_ofst($CPI
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret double 0x0000000000000003
+}
diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll
index c7fe6fd..a403744 100644
--- a/test/CodeGen/Mips/longbranch.ll
+++ b/test/CodeGen/Mips/longbranch.ll
@@ -7,6 +7,8 @@
; RUN: < %s | FileCheck %s -check-prefix=N64
; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=micromips \
; RUN: -force-mips-long-branch -O3 < %s | FileCheck %s -check-prefix=MICROMIPS
+; RUN: llc -mtriple=mipsel-none-nacl -force-mips-long-branch -O3 < %s \
+; RUN: | FileCheck %s -check-prefix=NACL
@x = external global i32
@@ -126,4 +128,36 @@ end:
; MICROMIPS: $[[BB2]]:
; MICROMIPS: jr $ra
; MICROMIPS: nop
+
+
+; Check the NaCl version. Check that sp change is not in the branch delay slot
+; of "jr $1" instruction. Check that target of indirect branch "jr $1" is
+; bundle aligned.
+
+; NACL: lui $[[R0:[0-9]+]], %hi(_gp_disp)
+; NACL: addiu $[[R0]], $[[R0]], %lo(_gp_disp)
+; NACL: bnez $4, $[[BB0:BB[0-9_]+]]
+; NACL: addu $[[GP:[0-9]+]], $[[R0]], $25
+
+; Check for long branch expansion:
+; NACL: addiu $sp, $sp, -8
+; NACL-NEXT: sw $ra, 0($sp)
+; NACL-NEXT: lui $1, %hi(($[[BB2:BB[0-9_]+]])-($[[BB1:BB[0-9_]+]]))
+; NACL-NEXT: bal $[[BB1]]
+; NACL-NEXT: addiu $1, $1, %lo(($[[BB2]])-($[[BB1]]))
+; NACL-NEXT: $[[BB1]]:
+; NACL-NEXT: addu $1, $ra, $1
+; NACL-NEXT: lw $ra, 0($sp)
+; NACL-NEXT: addiu $sp, $sp, 8
+; NACL-NEXT: jr $1
+; NACL-NEXT: nop
+
+; NACL: $[[BB0]]:
+; NACL: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
+; NACL: addiu $[[R2:[0-9]+]], $zero, 1
+; NACL: sw $[[R2]], 0($[[R1]])
+; NACL: .align 4
+; NACL-NEXT: $[[BB2]]:
+; NACL: jr $ra
+; NACL: nop
}
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 0dbb2c2..8222967 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -1,9 +1,49 @@
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=32
-; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=DSP
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc -march=mips -mcpu=mips32 -mattr=dsp < %s | FileCheck %s -check-prefix=DSP
+; RUN: llc -march=mips -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc -march=mips -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc -march=mips -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64R6
+
+; FIXME: The MIPS16 test should check its output
; RUN: llc -march=mips -mcpu=mips16 < %s
-; 32: madd ${{[0-9]+}}
-; DSP: madd $ac
+; ALL-LABEL: madd1:
+
+; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]add ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: sra $[[T0:[0-9]+]], $6, 31
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: madd $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64-DAG: daddu $2, $[[T2]], $[[T3]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64R6-DAG: daddu $2, $[[T2]], $[[T3]]
+
define i64 @madd1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -14,8 +54,47 @@ entry:
ret i64 %add
}
-; 32: maddu ${{[0-9]+}}
-; DSP: maddu $ac
+; ALL-LABEL: madd2:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]addu ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: maddu $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; FIXME: There's a redundant move here. We should remove it
+; 32R6-DAG: muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T3]], $[[T2]]
+
+; 64-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64-DAG: d[[m:m]]ult $[[T3]], $[[T1]]
+; 64-DAG: [[m]]flo $[[T4:[0-9]+]]
+; 64-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64-DAG: daddu $2, $[[T4]], $[[T6]]
+
+; 64R6-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64R6-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64R6-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64R6-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64R6-DAG: dmul $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+; 64R6-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64R6-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64R6-DAG: daddu $2, $[[T4]], $[[T6]]
+
define i64 @madd2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %a to i64
@@ -26,8 +105,38 @@ entry:
ret i64 %add
}
-; 32: madd ${{[0-9]+}}
-; DSP: madd $ac
+; ALL-LABEL: madd3:
+
+; 32-DAG: mthi $6
+; 32-DAG: mtlo $7
+; 32-DAG: [[m:m]]add ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: mthi $[[AC:ac[0-3]+]], $6
+; DSP-DAG: mtlo $[[AC]], $7
+; DSP-DAG: madd $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $7
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $7
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $6
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: daddu $2, $[[T2]], $6
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: daddu $2, $[[T2]], $6
+
define i64 @madd3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -37,8 +146,41 @@ entry:
ret i64 %add
}
-; 32: msub ${{[0-9]+}}
-; DSP: msub $ac
+; ALL-LABEL: msub1:
+
+; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]sub ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: sra $[[T0:[0-9]+]], $6, 31
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msub $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T3:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; 32R6-DAG: sra $[[T5:[0-9]+]], $6, 31
+; 32R6-DAG: subu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64-DAG: dsubu $2, $[[T3]], $[[T2]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64R6-DAG: dsubu $2, $[[T3]], $[[T2]]
+
define i64 @msub1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %c to i64
@@ -49,8 +191,48 @@ entry:
ret i64 %sub
}
-; 32: msubu ${{[0-9]+}}
-; DSP: msubu $ac
+; ALL-LABEL: msub2:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]subu ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msubu $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: negu $2, $[[T3]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
+
+; 64-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64-DAG: d[[m:m]]ult $[[T3]], $[[T1]]
+; 64-DAG: [[m]]flo $[[T4:[0-9]+]]
+; 64-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64-DAG: dsubu $2, $[[T6]], $[[T4]]
+
+; 64R6-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64R6-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64R6-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64R6-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64R6-DAG: dmul $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+; 64R6-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64R6-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64R6-DAG: dsubu $2, $[[T6]], $[[T4]]
+
define i64 @msub2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %c to i64
@@ -61,8 +243,39 @@ entry:
ret i64 %sub
}
-; 32: msub ${{[0-9]+}}
-; DSP: msub $ac
+; ALL-LABEL: msub3:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: mthi $6
+; 32-DAG: mtlo $7
+; 32-DAG: [[m:m]]sub ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msub $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: subu $2, $6, $[[T3]]
+; 32R6-DAG: subu $3, $7, $[[T1]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: dsubu $2, $6, $[[T2]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: dsubu $2, $6, $[[T2]]
+
define i64 @msub3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
diff --git a/test/CodeGen/Mips/mips16ex.ll b/test/CodeGen/Mips/mips16ex.ll
index ecb30b5..a1a9919 100644
--- a/test/CodeGen/Mips/mips16ex.ll
+++ b/test/CodeGen/Mips/mips16ex.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
-;16: $eh_func_begin0=.
+;16: .cfi_personality
+;16-NEXT: [[TMP:.*]]:
+;16-NEXT: $eh_func_begin0 = ([[TMP]])
@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
@_ZTIi = external constant i8*
@.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1
diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll
index 4d590b6..7f7d515 100644
--- a/test/CodeGen/Mips/mips64-f128.ll
+++ b/test/CodeGen/Mips/mips64-f128.ll
@@ -1,7 +1,11 @@
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -soft-float -O1 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -soft-float -O1 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=CMP_CC_FMT
@gld0 = external global fp128
@gld1 = external global fp128
@@ -9,8 +13,8 @@
@gf1 = external global float
@gd1 = external global double
-; CHECK-LABEL: addLD:
-; CHECK: ld $25, %call16(__addtf3)
+; ALL-LABEL: addLD:
+; ALL: ld $25, %call16(__addtf3)
define fp128 @addLD() {
entry:
@@ -20,8 +24,8 @@ entry:
ret fp128 %add
}
-; CHECK-LABEL: subLD:
-; CHECK: ld $25, %call16(__subtf3)
+; ALL-LABEL: subLD:
+; ALL: ld $25, %call16(__subtf3)
define fp128 @subLD() {
entry:
@@ -31,8 +35,8 @@ entry:
ret fp128 %sub
}
-; CHECK-LABEL: mulLD:
-; CHECK: ld $25, %call16(__multf3)
+; ALL-LABEL: mulLD:
+; ALL: ld $25, %call16(__multf3)
define fp128 @mulLD() {
entry:
@@ -42,8 +46,8 @@ entry:
ret fp128 %mul
}
-; CHECK-LABEL: divLD:
-; CHECK: ld $25, %call16(__divtf3)
+; ALL-LABEL: divLD:
+; ALL: ld $25, %call16(__divtf3)
define fp128 @divLD() {
entry:
@@ -53,8 +57,8 @@ entry:
ret fp128 %div
}
-; CHECK-LABEL: conv_LD_char:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_char:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_char(i8 signext %a) {
entry:
@@ -62,8 +66,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_short:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_short:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_short(i16 signext %a) {
entry:
@@ -71,8 +75,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_int:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_int:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_int(i32 %a) {
entry:
@@ -80,8 +84,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_LL:
-; CHECK: ld $25, %call16(__floatditf)
+; ALL-LABEL: conv_LD_LL:
+; ALL: ld $25, %call16(__floatditf)
define fp128 @conv_LD_LL(i64 %a) {
entry:
@@ -89,8 +93,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UChar:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UChar:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UChar(i8 zeroext %a) {
entry:
@@ -98,8 +102,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UShort:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UShort:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UShort(i16 zeroext %a) {
entry:
@@ -107,8 +111,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UInt:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UInt:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UInt(i32 %a) {
entry:
@@ -116,8 +120,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_ULL:
-; CHECK: ld $25, %call16(__floatunditf)
+; ALL-LABEL: conv_LD_ULL:
+; ALL: ld $25, %call16(__floatunditf)
define fp128 @conv_LD_ULL(i64 %a) {
entry:
@@ -125,8 +129,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_char_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_char_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define signext i8 @conv_char_LD(fp128 %a) {
entry:
@@ -134,8 +138,8 @@ entry:
ret i8 %conv
}
-; CHECK-LABEL: conv_short_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_short_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define signext i16 @conv_short_LD(fp128 %a) {
entry:
@@ -143,8 +147,8 @@ entry:
ret i16 %conv
}
-; CHECK-LABEL: conv_int_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_int_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define i32 @conv_int_LD(fp128 %a) {
entry:
@@ -152,8 +156,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: conv_LL_LD:
-; CHECK: ld $25, %call16(__fixtfdi)
+; ALL-LABEL: conv_LL_LD:
+; ALL: ld $25, %call16(__fixtfdi)
define i64 @conv_LL_LD(fp128 %a) {
entry:
@@ -161,8 +165,8 @@ entry:
ret i64 %conv
}
-; CHECK-LABEL: conv_UChar_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_UChar_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define zeroext i8 @conv_UChar_LD(fp128 %a) {
entry:
@@ -170,8 +174,8 @@ entry:
ret i8 %conv
}
-; CHECK-LABEL: conv_UShort_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_UShort_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define zeroext i16 @conv_UShort_LD(fp128 %a) {
entry:
@@ -179,8 +183,8 @@ entry:
ret i16 %conv
}
-; CHECK-LABEL: conv_UInt_LD:
-; CHECK: ld $25, %call16(__fixunstfsi)
+; ALL-LABEL: conv_UInt_LD:
+; ALL: ld $25, %call16(__fixunstfsi)
define i32 @conv_UInt_LD(fp128 %a) {
entry:
@@ -188,8 +192,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: conv_ULL_LD:
-; CHECK: ld $25, %call16(__fixunstfdi)
+; ALL-LABEL: conv_ULL_LD:
+; ALL: ld $25, %call16(__fixunstfdi)
define i64 @conv_ULL_LD(fp128 %a) {
entry:
@@ -197,8 +201,8 @@ entry:
ret i64 %conv
}
-; CHECK-LABEL: conv_LD_float:
-; CHECK: ld $25, %call16(__extendsftf2)
+; ALL-LABEL: conv_LD_float:
+; ALL: ld $25, %call16(__extendsftf2)
define fp128 @conv_LD_float(float %a) {
entry:
@@ -206,8 +210,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_double:
-; CHECK: ld $25, %call16(__extenddftf2)
+; ALL-LABEL: conv_LD_double:
+; ALL: ld $25, %call16(__extenddftf2)
define fp128 @conv_LD_double(double %a) {
entry:
@@ -215,8 +219,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_float_LD:
-; CHECK: ld $25, %call16(__trunctfsf2)
+; ALL-LABEL: conv_float_LD:
+; ALL: ld $25, %call16(__trunctfsf2)
define float @conv_float_LD(fp128 %a) {
entry:
@@ -224,8 +228,8 @@ entry:
ret float %conv
}
-; CHECK-LABEL: conv_double_LD:
-; CHECK: ld $25, %call16(__trunctfdf2)
+; ALL-LABEL: conv_double_LD:
+; ALL: ld $25, %call16(__trunctfdf2)
define double @conv_double_LD(fp128 %a) {
entry:
@@ -233,13 +237,13 @@ entry:
ret double %conv
}
-; CHECK-LABEL: libcall1_fabsl:
-; CHECK-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
-; CHECK-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
-; CHECK-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
-; CHECK-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
-; CHECK-DAG: and $4, $[[R0]], $[[R3]]
-; CHECK-DAG: ld $2, 0($[[R4]])
+; ALL-LABEL: libcall1_fabsl:
+; ALL-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
+; ALL-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
+; ALL-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
+; ALL-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
+; ALL-DAG: and $4, $[[R0]], $[[R3]]
+; ALL-DAG: ld $2, 0($[[R4]])
define fp128 @libcall1_fabsl() {
entry:
@@ -250,8 +254,8 @@ entry:
declare fp128 @fabsl(fp128) #1
-; CHECK-LABEL: libcall1_ceill:
-; CHECK: ld $25, %call16(ceill)
+; ALL-LABEL: libcall1_ceill:
+; ALL: ld $25, %call16(ceill)
define fp128 @libcall1_ceill() {
entry:
@@ -262,8 +266,8 @@ entry:
declare fp128 @ceill(fp128) #1
-; CHECK-LABEL: libcall1_sinl:
-; CHECK: ld $25, %call16(sinl)
+; ALL-LABEL: libcall1_sinl:
+; ALL: ld $25, %call16(sinl)
define fp128 @libcall1_sinl() {
entry:
@@ -274,8 +278,8 @@ entry:
declare fp128 @sinl(fp128) #2
-; CHECK-LABEL: libcall1_cosl:
-; CHECK: ld $25, %call16(cosl)
+; ALL-LABEL: libcall1_cosl:
+; ALL: ld $25, %call16(cosl)
define fp128 @libcall1_cosl() {
entry:
@@ -286,8 +290,8 @@ entry:
declare fp128 @cosl(fp128) #2
-; CHECK-LABEL: libcall1_expl:
-; CHECK: ld $25, %call16(expl)
+; ALL-LABEL: libcall1_expl:
+; ALL: ld $25, %call16(expl)
define fp128 @libcall1_expl() {
entry:
@@ -298,8 +302,8 @@ entry:
declare fp128 @expl(fp128) #2
-; CHECK-LABEL: libcall1_exp2l:
-; CHECK: ld $25, %call16(exp2l)
+; ALL-LABEL: libcall1_exp2l:
+; ALL: ld $25, %call16(exp2l)
define fp128 @libcall1_exp2l() {
entry:
@@ -310,8 +314,8 @@ entry:
declare fp128 @exp2l(fp128) #2
-; CHECK-LABEL: libcall1_logl:
-; CHECK: ld $25, %call16(logl)
+; ALL-LABEL: libcall1_logl:
+; ALL: ld $25, %call16(logl)
define fp128 @libcall1_logl() {
entry:
@@ -322,8 +326,8 @@ entry:
declare fp128 @logl(fp128) #2
-; CHECK-LABEL: libcall1_log2l:
-; CHECK: ld $25, %call16(log2l)
+; ALL-LABEL: libcall1_log2l:
+; ALL: ld $25, %call16(log2l)
define fp128 @libcall1_log2l() {
entry:
@@ -334,8 +338,8 @@ entry:
declare fp128 @log2l(fp128) #2
-; CHECK-LABEL: libcall1_log10l:
-; CHECK: ld $25, %call16(log10l)
+; ALL-LABEL: libcall1_log10l:
+; ALL: ld $25, %call16(log10l)
define fp128 @libcall1_log10l() {
entry:
@@ -346,8 +350,8 @@ entry:
declare fp128 @log10l(fp128) #2
-; CHECK-LABEL: libcall1_nearbyintl:
-; CHECK: ld $25, %call16(nearbyintl)
+; ALL-LABEL: libcall1_nearbyintl:
+; ALL: ld $25, %call16(nearbyintl)
define fp128 @libcall1_nearbyintl() {
entry:
@@ -358,8 +362,8 @@ entry:
declare fp128 @nearbyintl(fp128) #1
-; CHECK-LABEL: libcall1_floorl:
-; CHECK: ld $25, %call16(floorl)
+; ALL-LABEL: libcall1_floorl:
+; ALL: ld $25, %call16(floorl)
define fp128 @libcall1_floorl() {
entry:
@@ -370,8 +374,8 @@ entry:
declare fp128 @floorl(fp128) #1
-; CHECK-LABEL: libcall1_sqrtl:
-; CHECK: ld $25, %call16(sqrtl)
+; ALL-LABEL: libcall1_sqrtl:
+; ALL: ld $25, %call16(sqrtl)
define fp128 @libcall1_sqrtl() {
entry:
@@ -382,8 +386,8 @@ entry:
declare fp128 @sqrtl(fp128) #2
-; CHECK-LABEL: libcall1_rintl:
-; CHECK: ld $25, %call16(rintl)
+; ALL-LABEL: libcall1_rintl:
+; ALL: ld $25, %call16(rintl)
define fp128 @libcall1_rintl() {
entry:
@@ -394,8 +398,8 @@ entry:
declare fp128 @rintl(fp128) #1
-; CHECK-LABEL: libcall_powil:
-; CHECK: ld $25, %call16(__powitf2)
+; ALL-LABEL: libcall_powil:
+; ALL: ld $25, %call16(__powitf2)
define fp128 @libcall_powil(fp128 %a, i32 %b) {
entry:
@@ -405,18 +409,18 @@ entry:
declare fp128 @llvm.powi.f128(fp128, i32) #3
-; CHECK-LABEL: libcall2_copysignl:
-; CHECK-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
-; CHECK-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
-; CHECK-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
-; CHECK-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
-; CHECK-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
-; CHECK-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
-; CHECK-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
-; CHECK-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
-; CHECK-DAG: or $4, $[[R8]], $[[R4]]
-; CHECK-DAG: ld $2, 0($[[R5]])
+; ALL-LABEL: libcall2_copysignl:
+; ALL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
+; ALL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
+; ALL-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
+; ALL-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
+; ALL-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
+; ALL-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
+; ALL-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
+; ALL-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
+; ALL-DAG: or $4, $[[R8]], $[[R4]]
+; ALL-DAG: ld $2, 0($[[R5]])
define fp128 @libcall2_copysignl() {
entry:
@@ -428,8 +432,8 @@ entry:
declare fp128 @copysignl(fp128, fp128) #1
-; CHECK-LABEL: libcall2_powl:
-; CHECK: ld $25, %call16(powl)
+; ALL-LABEL: libcall2_powl:
+; ALL: ld $25, %call16(powl)
define fp128 @libcall2_powl() {
entry:
@@ -441,8 +445,8 @@ entry:
declare fp128 @powl(fp128, fp128) #2
-; CHECK-LABEL: libcall2_fmodl:
-; CHECK: ld $25, %call16(fmodl)
+; ALL-LABEL: libcall2_fmodl:
+; ALL: ld $25, %call16(fmodl)
define fp128 @libcall2_fmodl() {
entry:
@@ -454,8 +458,8 @@ entry:
declare fp128 @fmodl(fp128, fp128) #2
-; CHECK-LABEL: libcall3_fmal:
-; CHECK: ld $25, %call16(fmal)
+; ALL-LABEL: libcall3_fmal:
+; ALL: ld $25, %call16(fmal)
define fp128 @libcall3_fmal() {
entry:
@@ -468,8 +472,8 @@ entry:
declare fp128 @llvm.fma.f128(fp128, fp128, fp128) #4
-; CHECK-LABEL: cmp_lt:
-; CHECK: ld $25, %call16(__lttf2)
+; ALL-LABEL: cmp_lt:
+; ALL: ld $25, %call16(__lttf2)
define i32 @cmp_lt(fp128 %a, fp128 %b) {
entry:
@@ -478,8 +482,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_le:
-; CHECK: ld $25, %call16(__letf2)
+; ALL-LABEL: cmp_le:
+; ALL: ld $25, %call16(__letf2)
define i32 @cmp_le(fp128 %a, fp128 %b) {
entry:
@@ -488,8 +492,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_gt:
-; CHECK: ld $25, %call16(__gttf2)
+; ALL-LABEL: cmp_gt:
+; ALL: ld $25, %call16(__gttf2)
define i32 @cmp_gt(fp128 %a, fp128 %b) {
entry:
@@ -498,8 +502,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_ge:
-; CHECK: ld $25, %call16(__getf2)
+; ALL-LABEL: cmp_ge:
+; ALL: ld $25, %call16(__getf2)
define i32 @cmp_ge(fp128 %a, fp128 %b) {
entry:
@@ -508,8 +512,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_eq:
-; CHECK: ld $25, %call16(__eqtf2)
+; ALL-LABEL: cmp_eq:
+; ALL: ld $25, %call16(__eqtf2)
define i32 @cmp_eq(fp128 %a, fp128 %b) {
entry:
@@ -518,8 +522,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_ne:
-; CHECK: ld $25, %call16(__netf2)
+; ALL-LABEL: cmp_ne:
+; ALL: ld $25, %call16(__netf2)
define i32 @cmp_ne(fp128 %a, fp128 %b) {
entry:
@@ -528,10 +532,10 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: load_LD_LD:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $2, 0($[[R0]])
-; CHECK: ld $4, 8($[[R0]])
+; ALL-LABEL: load_LD_LD:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $2, 0($[[R0]])
+; ALL: ld $4, 8($[[R0]])
define fp128 @load_LD_LD() {
entry:
@@ -539,11 +543,11 @@ entry:
ret fp128 %0
}
-; CHECK-LABEL: load_LD_float:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gf1)
-; CHECK: lw $4, 0($[[R0]])
-; CHECK: ld $25, %call16(__extendsftf2)
-; CHECK: jalr $25
+; ALL-LABEL: load_LD_float:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gf1)
+; ALL: lw $4, 0($[[R0]])
+; ALL: ld $25, %call16(__extendsftf2)
+; ALL: jalr $25
define fp128 @load_LD_float() {
entry:
@@ -552,11 +556,11 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: load_LD_double:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gd1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $25, %call16(__extenddftf2)
-; CHECK: jalr $25
+; ALL-LABEL: load_LD_double:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gd1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $25, %call16(__extenddftf2)
+; ALL: jalr $25
define fp128 @load_LD_double() {
entry:
@@ -565,13 +569,13 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: store_LD_LD:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: ld $[[R2:[0-9]+]], 8($[[R0]])
-; CHECK: ld $[[R3:[0-9]+]], %got_disp(gld0)
-; CHECK: sd $[[R2]], 8($[[R3]])
-; CHECK: sd $[[R1]], 0($[[R3]])
+; ALL-LABEL: store_LD_LD:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: ld $[[R2:[0-9]+]], 8($[[R0]])
+; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0)
+; ALL: sd $[[R2]], 8($[[R3]])
+; ALL: sd $[[R1]], 0($[[R3]])
define void @store_LD_LD() {
entry:
@@ -580,14 +584,14 @@ entry:
ret void
}
-; CHECK-LABEL: store_LD_float:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $5, 8($[[R0]])
-; CHECK: ld $25, %call16(__trunctfsf2)
-; CHECK: jalr $25
-; CHECK: ld $[[R1:[0-9]+]], %got_disp(gf1)
-; CHECK: sw $2, 0($[[R1]])
+; ALL-LABEL: store_LD_float:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $5, 8($[[R0]])
+; ALL: ld $25, %call16(__trunctfsf2)
+; ALL: jalr $25
+; ALL: ld $[[R1:[0-9]+]], %got_disp(gf1)
+; ALL: sw $2, 0($[[R1]])
define void @store_LD_float() {
entry:
@@ -597,14 +601,14 @@ entry:
ret void
}
-; CHECK-LABEL: store_LD_double:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $5, 8($[[R0]])
-; CHECK: ld $25, %call16(__trunctfdf2)
-; CHECK: jalr $25
-; CHECK: ld $[[R1:[0-9]+]], %got_disp(gd1)
-; CHECK: sd $2, 0($[[R1]])
+; ALL-LABEL: store_LD_double:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $5, 8($[[R0]])
+; ALL: ld $25, %call16(__trunctfdf2)
+; ALL: jalr $25
+; ALL: ld $[[R1:[0-9]+]], %got_disp(gd1)
+; ALL: sd $2, 0($[[R1]])
define void @store_LD_double() {
entry:
@@ -614,11 +618,22 @@ entry:
ret void
}
-; CHECK-LABEL: select_LD:
-; CHECK: movn $8, $6, $4
-; CHECK: movn $9, $7, $4
-; CHECK: move $2, $8
-; CHECK: move $4, $9
+; ALL-LABEL: select_LD:
+; C_CC_FMT: movn $8, $6, $4
+; C_CC_FMT: movn $9, $7, $4
+; C_CC_FMT: move $2, $8
+; C_CC_FMT: move $4, $9
+
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; CMP_CC_FMT-DAG: sll $[[CC:[0-9]+]], $4, 0
+; CMP_CC_FMT-DAG: seleqz $[[EQ1:[0-9]+]], $8, $[[CC]]
+; CMP_CC_FMT-DAG: selnez $[[NE1:[0-9]+]], $6, $[[CC]]
+; CMP_CC_FMT-DAG: or $2, $[[NE1]], $[[EQ1]]
+; CMP_CC_FMT-DAG: seleqz $[[EQ2:[0-9]+]], $9, $[[CC]]
+; CMP_CC_FMT-DAG: selnez $[[NE2:[0-9]+]], $7, $[[CC]]
+; CMP_CC_FMT-DAG: or $4, $[[NE2]], $[[EQ2]]
define fp128 @select_LD(i32 %a, i64, fp128 %b, fp128 %c) {
entry:
@@ -627,18 +642,27 @@ entry:
ret fp128 %cond
}
-; CHECK-LABEL: selectCC_LD:
-; CHECK: move $[[R0:[0-9]+]], $11
-; CHECK: move $[[R1:[0-9]+]], $10
-; CHECK: move $[[R2:[0-9]+]], $9
-; CHECK: move $[[R3:[0-9]+]], $8
-; CHECK: ld $25, %call16(__gttf2)($gp)
-; CHECK: jalr $25
-; CHECK: slti $1, $2, 1
-; CHECK: movz $[[R1]], $[[R3]], $1
-; CHECK: movz $[[R0]], $[[R2]], $1
-; CHECK: move $2, $[[R1]]
-; CHECK: move $4, $[[R0]]
+; ALL-LABEL: selectCC_LD:
+; ALL: move $[[R0:[0-9]+]], $11
+; ALL: move $[[R1:[0-9]+]], $10
+; ALL: move $[[R2:[0-9]+]], $9
+; ALL: move $[[R3:[0-9]+]], $8
+; ALL: ld $25, %call16(__gttf2)($gp)
+; ALL: jalr $25
+
+; C_CC_FMT: slti $[[CC:[0-9]+]], $2, 1
+; C_CC_FMT: movz $[[R1]], $[[R3]], $[[CC]]
+; C_CC_FMT: movz $[[R0]], $[[R2]], $[[CC]]
+; C_CC_FMT: move $2, $[[R1]]
+; C_CC_FMT: move $4, $[[R0]]
+
+; CMP_CC_FMT: slt $[[CC:[0-9]+]], $zero, $2
+; CMP_CC_FMT: seleqz $[[EQ1:[0-9]+]], $[[R1]], $[[CC]]
+; CMP_CC_FMT: selnez $[[NE1:[0-9]+]], $[[R3]], $[[CC]]
+; CMP_CC_FMT: or $2, $[[NE1]], $[[EQ1]]
+; CMP_CC_FMT: seleqz $[[EQ2:[0-9]+]], $[[R0]], $[[CC]]
+; CMP_CC_FMT: selnez $[[NE2:[0-9]+]], $[[R2]], $[[CC]]
+; CMP_CC_FMT: or $4, $[[NE2]], $[[EQ2]]
define fp128 @selectCC_LD(fp128 %a, fp128 %b, fp128 %c, fp128 %d) {
entry:
diff --git a/test/CodeGen/Mips/mips64-fp-indexed-ls.ll b/test/CodeGen/Mips/mips64-fp-indexed-ls.ll
deleted file mode 100644
index bbdc05c..0000000
--- a/test/CodeGen/Mips/mips64-fp-indexed-ls.ll
+++ /dev/null
@@ -1,110 +0,0 @@
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
-
-%struct.S = type <{ [4 x float] }>
-%struct.S2 = type <{ [4 x double] }>
-%struct.S3 = type <{ i8, float }>
-
-@s = external global [4 x %struct.S]
-@gf = external global float
-@gd = external global double
-@s2 = external global [4 x %struct.S2]
-@s3 = external global %struct.S3
-
-define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
-entry:
-; CHECK: lwxc1
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds float* %b, i64 %idxprom
- %0 = load float* %arrayidx, align 4
- ret float %0
-}
-
-define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
-entry:
-; CHECK: ldxc1
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds double* %b, i64 %idxprom
- %0 = load double* %arrayidx, align 8
- ret double %0
-}
-
-define float @foo2(i32 %b, i32 %c) nounwind readonly {
-entry:
-; CHECK-NOT: luxc1
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S]* @s, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- %0 = load float* %arrayidx2, align 1
- ret float %0
-}
-
-define void @foo3(float* nocapture %b, i32 %o) nounwind {
-entry:
-; CHECK: swxc1
- %0 = load float* @gf, align 4
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds float* %b, i64 %idxprom
- store float %0, float* %arrayidx, align 4
- ret void
-}
-
-define void @foo4(double* nocapture %b, i32 %o) nounwind {
-entry:
-; CHECK: sdxc1
- %0 = load double* @gd, align 8
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds double* %b, i64 %idxprom
- store double %0, double* %arrayidx, align 8
- ret void
-}
-
-define void @foo5(i32 %b, i32 %c) nounwind {
-entry:
-; CHECK-NOT: suxc1
- %0 = load float* @gf, align 4
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S]* @s, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- store float %0, float* %arrayidx2, align 1
- ret void
-}
-
-define double @foo6(i32 %b, i32 %c) nounwind readonly {
-entry:
-; CHECK: foo6
-; CHECK-NOT: luxc1
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S2]* @s2, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- %0 = load double* %arrayidx2, align 1
- ret double %0
-}
-
-define void @foo7(i32 %b, i32 %c) nounwind {
-entry:
-; CHECK: foo7
-; CHECK-NOT: suxc1
- %0 = load double* @gd, align 8
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S2]* @s2, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- store double %0, double* %arrayidx2, align 1
- ret void
-}
-
-define float @foo8() nounwind readonly {
-entry:
-; CHECK: foo8
-; CHECK-NOT: luxc1
- %0 = load float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
- ret float %0
-}
-
-define void @foo9(float %f) nounwind {
-entry:
-; CHECK: foo9
-; CHECK-NOT: suxc1
- store float %f, float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
- ret void
-}
-
diff --git a/test/CodeGen/Mips/mips64countleading.ll b/test/CodeGen/Mips/mips64countleading.ll
deleted file mode 100644
index 252f323..0000000
--- a/test/CodeGen/Mips/mips64countleading.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck -check-prefix=CHECK -check-prefix=MIPS4 %s
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck -check-prefix=CHECK -check-prefix=MIPS64 %s
-
-define i64 @t1(i64 %X) nounwind readnone {
-entry:
-; CHECK-LABEL: t1:
-; MIPS4-NOT: dclz
-; MIPS64: dclz
- %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
- ret i64 %tmp1
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
-
-define i64 @t3(i64 %X) nounwind readnone {
-entry:
-; CHECK-LABEL: t3:
-; MIPS4-NOT: dclo
-; MIPS64: dclo
- %neg = xor i64 %X, -1
- %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
- ret i64 %tmp1
-}
-
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
index 58f11f1..ed617be 100644
--- a/test/CodeGen/Mips/mips64instrs.ll
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -1,99 +1,128 @@
-; RUN: llc -march=mips64el -mcpu=mips4 -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK -check-prefix=MIPS4 %s
-; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK -check-prefix=MIPS64 %s
+; RUN: llc -march=mips64el -mcpu=mips4 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS4 -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64r6 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=GPRMULDIV %s
@gll0 = common global i64 0, align 8
@gll1 = common global i64 0, align 8
define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: daddu
+; ALL-LABEL: f0:
+; ALL: daddu $2, ${{[45]}}, ${{[45]}}
%add = add nsw i64 %a1, %a0
ret i64 %add
}
define i64 @f1(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: dsubu
+; ALL-LABEL: f1:
+; ALL: dsubu $2, $4, $5
%sub = sub nsw i64 %a0, %a1
ret i64 %sub
}
define i64 @f4(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: and
+; ALL-LABEL: f4:
+; ALL: and $2, ${{[45]}}, ${{[45]}}
%and = and i64 %a1, %a0
ret i64 %and
}
define i64 @f5(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: or
+; ALL-LABEL: f5:
+; ALL: or $2, ${{[45]}}, ${{[45]}}
%or = or i64 %a1, %a0
ret i64 %or
}
define i64 @f6(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: xor
+; ALL-LABEL: f6:
+; ALL: xor $2, ${{[45]}}, ${{[45]}}
%xor = xor i64 %a1, %a0
ret i64 %xor
}
define i64 @f7(i64 %a0) nounwind readnone {
entry:
-; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f7:
+; ALL: daddiu $2, $4, 20
%add = add nsw i64 %a0, 20
ret i64 %add
}
define i64 @f8(i64 %a0) nounwind readnone {
entry:
-; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, -20
+; ALL-LABEL: f8:
+; ALL: daddiu $2, $4, -20
%sub = add nsw i64 %a0, -20
ret i64 %sub
}
define i64 @f9(i64 %a0) nounwind readnone {
entry:
-; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f9:
+; ALL: andi $2, $4, 20
%and = and i64 %a0, 20
ret i64 %and
}
define i64 @f10(i64 %a0) nounwind readnone {
entry:
-; CHECK: ori ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f10:
+; ALL: ori $2, $4, 20
%or = or i64 %a0, 20
ret i64 %or
}
define i64 @f11(i64 %a0) nounwind readnone {
entry:
-; CHECK: xori ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f11:
+; ALL: xori $2, $4, 20
%xor = xor i64 %a0, 20
ret i64 %xor
}
define i64 @f12(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: mult
+; ALL-LABEL: f12:
+
+; ACCMULDIV: mult ${{[45]}}, ${{[45]}}
+; GPRMULDIV: dmul $2, ${{[45]}}, ${{[45]}}
+
%mul = mul nsw i64 %b, %a
ret i64 %mul
}
define i64 @f13(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: mult
+; ALL-LABEL: f13:
+
+; ACCMULDIV: mult ${{[45]}}, ${{[45]}}
+; GPRMULDIV: dmul $2, ${{[45]}}, ${{[45]}}
+
%mul = mul i64 %b, %a
ret i64 %mul
}
define i64 @f14(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f14:
-; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mflo
+; ALL-LABEL: f14:
+; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
+; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
+; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
+; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+
+; ACCMULDIV: ddiv $zero, $[[T0]], $[[T1]]
+; ACCMULDIV: teq $[[T1]], $zero, 7
+; ACCMULDIV: mflo $2
+
+; GPRMULDIV: ddiv $2, $[[T0]], $[[T1]]
+; GPRMULDIV: teq $[[T1]], $zero, 7
+
%0 = load i64* @gll0, align 8
%1 = load i64* @gll1, align 8
%div = sdiv i64 %0, %1
@@ -102,10 +131,19 @@ entry:
define i64 @f15() nounwind readnone {
entry:
-; CHECK-LABEL: f15:
-; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mflo
+; ALL-LABEL: f15:
+; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
+; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
+; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
+; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+
+; ACCMULDIV: ddivu $zero, $[[T0]], $[[T1]]
+; ACCMULDIV: teq $[[T1]], $zero, 7
+; ACCMULDIV: mflo $2
+
+; GPRMULDIV: ddivu $2, $[[T0]], $[[T1]]
+; GPRMULDIV: teq $[[T1]], $zero, 7
+
%0 = load i64* @gll0, align 8
%1 = load i64* @gll1, align 8
%div = udiv i64 %0, %1
@@ -114,20 +152,30 @@ entry:
define i64 @f16(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f16:
-; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mfhi
+; ALL-LABEL: f16:
+
+; ACCMULDIV: ddiv $zero, $4, $5
+; ACCMULDIV: teq $5, $zero, 7
+; ACCMULDIV: mfhi $2
+
+; GPRMULDIV: dmod $2, $4, $5
+; GPRMULDIV: teq $5, $zero, 7
+
%rem = srem i64 %a, %b
ret i64 %rem
}
define i64 @f17(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f17:
-; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mfhi
+; ALL-LABEL: f17:
+
+; ACCMULDIV: ddivu $zero, $4, $5
+; ACCMULDIV: teq $5, $zero, 7
+; ACCMULDIV: mfhi $2
+
+; GPRMULDIV: dmodu $2, $4, $5
+; GPRMULDIV: teq $5, $zero, 7
+
%rem = urem i64 %a, %b
ret i64 %rem
}
@@ -136,24 +184,26 @@ declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
define i64 @f18(i64 %X) nounwind readnone {
entry:
-; CHECK-LABEL: f18:
+; ALL-LABEL: f18:
; The MIPS4 version is too long to reasonably test. At least check we don't get dclz
-; MIPS4-NOT: dclz
+; MIPS4-NOT: dclz
+
+; HAS-DCLO: dclz $2, $4
-; MIPS64: dclz $2, $4
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
ret i64 %tmp1
}
define i64 @f19(i64 %X) nounwind readnone {
entry:
-; CHECK-LABEL: f19:
+; ALL-LABEL: f19:
; The MIPS4 version is too long to reasonably test. At least check we don't get dclo
-; MIPS4-NOT: dclo
+; MIPS4-NOT: dclo
+
+; HAS-DCLO: dclo $2, $4
-; MIPS64: dclo $2, $4
%neg = xor i64 %X, -1
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
ret i64 %tmp1
@@ -161,8 +211,8 @@ entry:
define i64 @f20(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f20:
-; CHECK: nor
+; ALL-LABEL: f20:
+; ALL: nor $2, ${{[45]}}, ${{[45]}}
%or = or i64 %b, %a
%neg = xor i64 %or, -1
ret i64 %neg
diff --git a/test/CodeGen/Mips/mips64muldiv.ll b/test/CodeGen/Mips/mips64muldiv.ll
index 39c73e9..32d05a9 100644
--- a/test/CodeGen/Mips/mips64muldiv.ll
+++ b/test/CodeGen/Mips/mips64muldiv.ll
@@ -1,50 +1,79 @@
-; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR
+
+; FileCheck prefixes:
+; ALL - All targets
+; ACC - Targets with accumulator based mul/div (i.e. pre-MIPS32r6)
+; GPR - Targets with register based mul/div (i.e. MIPS32r6)
define i64 @m0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: dmult
-; CHECK: mflo
+; ALL-LABEL: m0:
+; ACC: dmult ${{[45]}}, ${{[45]}}
+; ACC: mflo $2
+; GPR: dmul $2, ${{[45]}}, ${{[45]}}
%mul = mul i64 %a1, %a0
ret i64 %mul
}
define i64 @m1(i64 %a) nounwind readnone {
entry:
-; CHECK: dmult
-; CHECK: mfhi
+; ALL-LABEL: m1:
+; ALL: lui $[[T0:[0-9]+]], 21845
+; ALL: addiu $[[T0]], $[[T0]], 21845
+; ALL: dsll $[[T0]], $[[T0]], 16
+; ALL: addiu $[[T0]], $[[T0]], 21845
+; ALL: dsll $[[T0]], $[[T0]], 16
+; ALL: addiu $[[T0]], $[[T0]], 21846
+
+; ACC: dmult $4, $[[T0]]
+; ACC: mfhi $[[T1:[0-9]+]]
+; GPR: dmuh $[[T1:[0-9]+]], $4, $[[T0]]
+
+; ALL: dsrl $2, $[[T1]], 63
+; ALL: daddu $2, $[[T1]], $2
%div = sdiv i64 %a, 3
ret i64 %div
}
define i64 @d0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddivu
-; CHECK: mflo
+; ALL-LABEL: d0:
+; ACC: ddivu $zero, $4, $5
+; ACC: mflo $2
+; GPR: ddivu $2, $4, $5
%div = udiv i64 %a0, %a1
ret i64 %div
}
define i64 @d1(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddiv
-; CHECK: mflo
+; ALL-LABEL: d1:
+; ACC: ddiv $zero, $4, $5
+; ACC: mflo $2
+; GPR: ddiv $2, $4, $5
%div = sdiv i64 %a0, %a1
ret i64 %div
}
define i64 @d2(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddivu
-; CHECK: mfhi
+; ALL-LABEL: d2:
+; ACC: ddivu $zero, $4, $5
+; ACC: mfhi $2
+; GPR: dmodu $2, $4, $5
%rem = urem i64 %a0, %a1
ret i64 %rem
}
define i64 @d3(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddiv
-; CHECK: mfhi
+; ALL-LABEL: d3:
+; ACC: ddiv $zero, $4, $5
+; ACC: mfhi $2
+; GPR: dmod $2, $4, $5
%rem = srem i64 %a0, %a1
ret i64 %rem
}
diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
index f4854f8..244b03d 100644
--- a/test/CodeGen/Mips/mno-ldc1-sdc1.ll
+++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -1,33 +1,113 @@
-; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 -mcpu=mips32r2 \
-; RUN: < %s | FileCheck %s -check-prefix=LE-PIC
-; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 < %s | \
-; RUN: FileCheck %s -check-prefix=LE-STATIC
-; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 < %s | \
-; RUN: FileCheck %s -check-prefix=BE-PIC
+; Check that [sl]dc1 are normally emitted. MIPS32r2 should have [sl]dxc1 too.
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1-LDC1
; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | \
-; RUN: FileCheck %s -check-prefix=CHECK-LDC1-SDC1
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2-LDXC1
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6-LDC1
+
+; Check that -mno-ldc1-sdc1 disables [sl]dc1
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-LE -check-prefix=32R1-LE-PIC
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-LE -check-prefix=32R2-LE-PIC
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-LE -check-prefix=32R6-LE-PIC
+
+; Check again for big-endian
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-BE -check-prefix=32R1-BE-PIC
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-BE -check-prefix=32R2-BE-PIC
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-BE -check-prefix=32R6-BE-PIC
+
+; Check again for the static relocation model
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-LE -check-prefix=32R1-LE-STATIC
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-LE -check-prefix=32R2-LE-STATIC
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-LE -check-prefix=32R6-LE-STATIC
@g0 = common global double 0.000000e+00, align 8
-; LE-PIC-LABEL: test_ldc1:
-; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; LE-PIC-DAG: mtc1 $[[R0]], $f0
-; LE-PIC-DAG: mtc1 $[[R1]], $f1
-; LE-STATIC-LABEL: test_ldc1:
-; LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
-; LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
-; LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
-; LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
-; LE-STATIC-DAG: mtc1 $[[R1]], $f0
-; LE-STATIC-DAG: mtc1 $[[R3]], $f1
-; BE-PIC-LABEL: test_ldc1:
-; BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; BE-PIC-DAG: mtc1 $[[R1]], $f0
-; BE-PIC-DAG: mtc1 $[[R0]], $f1
-; CHECK-LDC1-SDC1-LABEL: test_ldc1:
-; CHECK-LDC1-SDC1: ldc1 $f{{[0-9]+}}
+; ALL-LABEL: test_ldc1:
+
+; 32R1-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R1-LE-PIC-DAG: mtc1 $[[R1]], $f1
+
+; 32R2-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R2-LE-PIC-DAG: mthc1 $[[R1]], $f0
+
+; 32R6-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R6-LE-PIC-DAG: mthc1 $[[R1]], $f0
+
+; 32R1-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R1-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R1-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R1-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R1-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R1-LE-STATIC-DAG: mtc1 $[[R3]], $f1
+
+; 32R2-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R2-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R2-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R2-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R2-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R2-LE-STATIC-DAG: mthc1 $[[R3]], $f0
+
+; 32R6-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R6-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R6-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R6-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R6-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R6-LE-STATIC-DAG: mthc1 $[[R3]], $f0
+
+; 32R1-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R1-BE-PIC-DAG: mtc1 $[[R0]], $f1
+
+; 32R2-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R2-BE-PIC-DAG: mthc1 $[[R0]], $f0
+
+; 32R6-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R6-BE-PIC-DAG: mthc1 $[[R0]], $f0
+
+; 32R1-LDC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R6-LDC1: ldc1 $f0, 0(${{[0-9]+}})
define double @test_ldc1() {
entry:
@@ -35,25 +115,64 @@ entry:
ret double %0
}
-; LE-PIC-LABEL: test_sdc1:
-; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
-; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
-; LE-STATIC-LABEL: test_sdc1:
-; LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
-; LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
-; LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
-; LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
-; BE-PIC-LABEL: test_sdc1:
-; BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
-; BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
-; CHECK-LDC1-SDC1-LABEL: test_sdc1:
-; CHECK-LDC1-SDC1: sdc1 $f{{[0-9]+}}
+; ALL-LABEL: test_sdc1:
+
+; 32R1-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R2-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R2-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R6-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-LE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R1-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R1-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R1-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R1-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R2-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R2-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R2-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R2-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R2-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R6-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-LE-STATIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R6-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R6-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R6-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R1-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R2-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R2-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R6-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-BE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R1-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R6-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
define void @test_sdc1(double %a) {
entry:
@@ -61,14 +180,35 @@ entry:
ret void
}
+; ALL-LABEL: test_ldxc1:
+
+; 32R1-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-DAG: mtc1 $[[R0]], $f0
+; 32R1-DAG: mtc1 $[[R1]], $f1
+
+; 32R2-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-DAG: mtc1 $[[R0]], $f0
+; 32R2-DAG: mthc1 $[[R1]], $f0
-; LE-PIC-LABEL: test_ldxc1:
-; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; LE-PIC-DAG: mtc1 $[[R0]], $f0
-; LE-PIC-DAG: mtc1 $[[R1]], $f1
-; CHECK-LDC1-SDC1-LABEL: test_ldxc1:
-; CHECK-LDC1-SDC1: ldxc1 $f{{[0-9]+}}
+; 32R6-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: mtc1 $[[R0]], $f0
+; 32R6-DAG: mthc1 $[[R1]], $f0
+
+; 32R1-LDC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sll $[[OFFSET:[0-9]+]], $5, 3
+; 32R2-LDXC1: ldxc1 $f0, $[[OFFSET]]($4)
+
+; 32R6-LDC1: ldc1 $f0, 0(${{[0-9]+}})
define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
entry:
@@ -77,13 +217,29 @@ entry:
ret double %0
}
-; LE-PIC-LABEL: test_sdxc1:
-; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
-; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
-; CHECK-LDC1-SDC1-LABEL: test_sdxc1:
-; CHECK-LDC1-SDC1: sdxc1 $f{{[0-9]+}}
+; ALL-LABEL: test_sdxc1:
+
+; 32R1-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R1-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R2-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R2-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R2-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R6-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R6-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R1-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sll $[[OFFSET:[0-9]+]], $7, 3
+; 32R2-LDXC1: sdxc1 $f{{[0-9]+}}, $[[OFFSET]]($6)
+
+; 32R6-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
entry:
diff --git a/test/CodeGen/Mips/msa/special.ll b/test/CodeGen/Mips/msa/special.ll
index f65a14f..b9badf5 100644
--- a/test/CodeGen/Mips/msa/special.ll
+++ b/test/CodeGen/Mips/msa/special.ll
@@ -4,6 +4,10 @@
; RUN: FileCheck %s --check-prefix=MIPS32
; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
; RUN: FileCheck %s --check-prefix=MIPS64
+; RUN: llc -march=mips -mcpu=mips32r6 -mattr=+msa < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r6 -mattr=+msa < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS64
define i32 @llvm_mips_lsa_test(i32 %a, i32 %b) nounwind {
entry:
diff --git a/test/CodeGen/Mips/no-odd-spreg.ll b/test/CodeGen/Mips/no-odd-spreg.ll
new file mode 100644
index 0000000..b42ed6a
--- /dev/null
+++ b/test/CodeGen/Mips/no-odd-spreg.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32r6 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32r6 -mattr=fp64,+nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOODDSPREG
+
+; ODDSPREG: .module oddspreg
+; NOODDSPREG: .module nooddspreg
+
+define float @two_floats(float %a) {
+entry:
+ ; Clobber all except $f12 and $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f12 and $f13 to avoid the spill/reload.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; will be forced to spill/reload either %a or %0.
+
+ %0 = fadd float %a, 1.0
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = fadd float %a, %0
+ ret float %1
+}
+
+; ALL-LABEL: two_floats:
+; ODDSPREG: add.s $f13, $f12, ${{f[0-9]+}}
+; ODDSPREG-NOT: swc1
+; ODDSPREG-NOT: lwc1
+; ODDSPREG: add.s $f0, $f12, $f13
+
+; NOODDSPREG: add.s $[[T0:f[0-9]*[02468]]], $f12, ${{f[0-9]+}}
+; NOODDSPREG: swc1 $[[T0]],
+; NOODDSPREG: lwc1 $[[T1:f[0-9]*[02468]]],
+; NOODDSPREG: add.s $f0, $f12, $[[T1]]
+
+define double @two_doubles(double %a) {
+entry:
+ ; Clobber all except $f12 and $f13
+ ;
+ ; -mno-odd-sp-reg doesn't need to affect double precision values so both cases
+ ; use $f12 and $f13.
+
+ %0 = fadd double %a, 1.0
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = fadd double %a, %0
+ ret double %1
+}
+
+; ALL-LABEL: two_doubles:
+; ALL: add.d $[[T0:f[0-9]+]], $f12, ${{f[0-9]+}}
+; ALL: add.d $f0, $f12, $[[T0]]
+
+
+; INVALID: -mattr=+nooddspreg is not currently permitted for a 32-bit FPU register file (FR=0 mode).
diff --git a/test/CodeGen/Mips/null-streamer.ll b/test/CodeGen/Mips/null-streamer.ll
new file mode 100644
index 0000000..56cebbf
--- /dev/null
+++ b/test/CodeGen/Mips/null-streamer.ll
@@ -0,0 +1,7 @@
+; Test the null streamer with a terget streamer.
+; RUN: llc -O0 -filetype=null -mtriple=mips-linux < %s
+
+define i32 @main() {
+entry:
+ ret i32 0
+}
diff --git a/test/CodeGen/Mips/prevent-hoisting.ll b/test/CodeGen/Mips/prevent-hoisting.ll
new file mode 100644
index 0000000..da665c2
--- /dev/null
+++ b/test/CodeGen/Mips/prevent-hoisting.ll
@@ -0,0 +1,144 @@
+; RUN: llc -march=mipsel -O3 < %s | FileCheck %s
+
+
+; MIPS direct branches implicitly define register $at. This test makes sure that
+; code hoisting optimization (which moves identical instructions at the start of
+; two basic blocks to the common predecessor block) takes this into account and
+; doesn't move definition of $at to the predecessor block (which would make $at
+; live-in at the start of successor block).
+
+
+; CHECK-LABEL: readLumaCoeff8x8_CABAC
+
+; The check for "addiu" instruction is added so that we can match the correct "b" instruction.
+; CHECK: addiu ${{[0-9]+}}, $zero, -1
+; CHECK: b $[[BB0:BB[0-9_]+]]
+
+; Check that sll instruction that writes to $1 starts basic block.
+; CHECK: {{BB[0-9_#]+}}:
+; CHECK-NEXT: sll $1, $[[R0:[0-9]+]], 4
+
+; Check that identical sll instruction starts another basic block.
+; CHECK: [[BB0]]:
+; CHECK-NEXT: sll $1, $[[R0]], 4
+
+
+%struct.img_par = type { i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], i8**, i32*, i32***, i32**, i32, i32, i32, i32, %struct.Slice*, %struct.macroblock*, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32***, i32***, i32****, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.datapartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (%struct.img_par*, %struct.inp_par*)*, i32, i32, i32, i32 }
+%struct.datapartition = type { %struct.Bitstream*, %struct.DecodingEnvironment, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)* }
+%struct.Bitstream = type { i32, i32, i32, i32, i8*, i32 }
+%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32* }
+%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, void (i32, i32, i32*, i32*)*, void (%struct.syntaxelement*, %struct.img_par*, %struct.DecodingEnvironment*)* }
+%struct.MotionInfoContexts = type { [4 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
+%struct.BiContextType = type { i16, i8 }
+%struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
+%struct.inp_par = type { [1000 x i8], [1000 x i8], [1000 x i8], i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.macroblock = type { i32, [2 x i32], i32, i32, %struct.macroblock*, %struct.macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s* }
+%struct.timeb = type { i32, i16, i16, i16 }
+
+@assignSE2partition = external global [0 x [20 x i32]]
+@FIELD_SCAN8x8 = external constant [64 x [2 x i8]]
+
+
+define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) {
+
+ %1 = load i32* undef, align 4
+ br i1 false, label %2, label %3
+
+; <label>:2 ; preds = %0
+ br label %3
+
+; <label>:3 ; preds = %2, %0
+ br i1 undef, label %switch.lookup, label %4
+
+switch.lookup: ; preds = %3
+ br label %4
+
+; <label>:4 ; preds = %switch.lookup, %3
+ br i1 undef, label %5, label %6
+
+; <label>:5 ; preds = %4
+ br label %6
+
+; <label>:6 ; preds = %5, %4
+ %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ]
+ br i1 undef, label %switch.lookup6, label %8
+
+switch.lookup6: ; preds = %6
+ br label %8
+
+; <label>:8 ; preds = %switch.lookup6, %6
+ br i1 undef, label %.loopexit, label %9
+
+; <label>:9 ; preds = %8
+ %10 = and i32 %b8, 1
+ %11 = shl nuw nsw i32 %10, 3
+ %12 = getelementptr inbounds %struct.Slice* null, i32 0, i32 9
+ br i1 undef, label %.preheader, label %.preheader11
+
+.preheader11: ; preds = %21, %9
+ %k.014 = phi i32 [ %27, %21 ], [ 0, %9 ]
+ %coef_ctr.013 = phi i32 [ %23, %21 ], [ -1, %9 ]
+ br i1 false, label %13, label %14
+
+; <label>:13 ; preds = %.preheader11
+ br label %15
+
+; <label>:14 ; preds = %.preheader11
+ br label %15
+
+; <label>:15 ; preds = %14, %13
+ %16 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %17 = load i32* %16, align 4
+ %18 = getelementptr inbounds %struct.datapartition* null, i32 %17, i32 2
+ %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4
+ %20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef)
+ br i1 false, label %.loopexit, label %21
+
+; <label>:21 ; preds = %15
+ %22 = add i32 %coef_ctr.013, 1
+ %23 = add i32 %22, 0
+ %24 = getelementptr inbounds [2 x i8]* %7, i32 %23, i32 0
+ %25 = add nsw i32 0, %11
+ %26 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
+ store i32 0, i32* %26, align 4
+ %27 = add nsw i32 %k.014, 1
+ %28 = icmp slt i32 %27, 65
+ br i1 %28, label %.preheader11, label %.loopexit
+
+.preheader: ; preds = %36, %9
+ %k.110 = phi i32 [ %45, %36 ], [ 0, %9 ]
+ %coef_ctr.29 = phi i32 [ %39, %36 ], [ -1, %9 ]
+ br i1 false, label %29, label %30
+
+; <label>:29 ; preds = %.preheader
+ br label %31
+
+; <label>:30 ; preds = %.preheader
+ br label %31
+
+; <label>:31 ; preds = %30, %29
+ %32 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %33 = load i32* %32, align 4
+ %34 = getelementptr inbounds %struct.datapartition* null, i32 %33
+ %35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34)
+ br i1 false, label %.loopexit, label %36
+
+; <label>:36 ; preds = %31
+ %37 = load i32* undef, align 4
+ %38 = add i32 %coef_ctr.29, 1
+ %39 = add i32 %38, %37
+ %40 = getelementptr inbounds [2 x i8]* %7, i32 %39, i32 0
+ %41 = load i8* %40, align 1
+ %42 = zext i8 %41 to i32
+ %43 = add nsw i32 %42, %11
+ %44 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
+ store i32 0, i32* %44, align 4
+ %45 = add nsw i32 %k.110, 1
+ %46 = icmp slt i32 %45, 65
+ br i1 %46, label %.preheader, label %.loopexit
+
+.loopexit: ; preds = %36, %31, %21, %15, %8
+ ret void
+}
diff --git a/test/CodeGen/Mips/select.ll b/test/CodeGen/Mips/select.ll
index 06e2a86..eb2198b 100644
--- a/test/CodeGen/Mips/select.ll
+++ b/test/CodeGen/Mips/select.ll
@@ -1,135 +1,705 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32R2
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64R2
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64R6
@d2 = external global double
@d3 = external global double
-define i32 @sel1(i32 %s, i32 %f0, i32 %f1) nounwind readnone {
+define i32 @i32_icmp_ne_i32_val(i32 %s, i32 %f0, i32 %f1) nounwind readnone {
entry:
-; CHECK: movn
+; ALL-LABEL: i32_icmp_ne_i32_val:
+
+; 32: movn $5, $6, $4
+; 32: move $2, $5
+
+; 32R2: movn $5, $6, $4
+; 32R2: move $2, $5
+
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 32R6: or $2, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 64R6: or $2, $[[T1]], $[[T0]]
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, i32 %f1, i32 %f0
ret i32 %cond
}
-define float @sel2(i32 %s, float %f0, float %f1) nounwind readnone {
+define i64 @i32_icmp_ne_i64_val(i32 %s, i64 %f0, i64 %f1) nounwind readnone {
+entry:
+; ALL-LABEL: i32_icmp_ne_i64_val:
+
+; 32-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32-DAG: movn $6, $[[F1]], $4
+; 32-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32: movn $7, $[[F1H]], $4
+; 32: move $2, $6
+; 32: move $3, $7
+
+; 32R2-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R2-DAG: movn $6, $[[F1]], $4
+; 32R2-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R2: movn $7, $[[F1H]], $4
+; 32R2: move $2, $6
+; 32R2: move $3, $7
+
+; 32R6-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $6, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1]], $4
+; 32R6: or $2, $[[T1]], $[[T0]]
+; 32R6-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $7, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1H]], $4
+; 32R6: or $3, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64R6-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 64R6: or $2, $[[T1]], $[[T0]]
+
+ %tobool = icmp ne i32 %s, 0
+ %cond = select i1 %tobool, i64 %f1, i64 %f0
+ ret i64 %cond
+}
+
+define i64 @i64_icmp_ne_i64_val(i64 %s, i64 %f0, i64 %f1) nounwind readnone {
entry:
-; CHECK: movn.s
+; ALL-LABEL: i64_icmp_ne_i64_val:
+
+; 32-DAG: or $[[CC:[0-9]+]], $4
+; 32-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32-DAG: movn $6, $[[F1]], $[[CC]]
+; 32-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32: movn $7, $[[F1H]], $[[CC]]
+; 32: move $2, $6
+; 32: move $3, $7
+
+; 32R2-DAG: or $[[CC:[0-9]+]], $4
+; 32R2-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R2-DAG: movn $6, $[[F1]], $[[CC]]
+; 32R2-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R2: movn $7, $[[F1H]], $[[CC]]
+; 32R2: move $2, $6
+; 32R2: move $3, $7
+
+; 32R6-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R6-DAG: or $[[T2:[0-9]+]], $4, $5
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $6, $[[T2]]
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1]], $[[T2]]
+; 32R6: or $2, $[[T1]], $[[T0]]
+; 32R6-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $7, $[[T2]]
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1H]], $[[T2]]
+; 32R6: or $3, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 64R6: or $2, $[[T1]], $[[T0]]
+
+ %tobool = icmp ne i64 %s, 0
+ %cond = select i1 %tobool, i64 %f1, i64 %f0
+ ret i64 %cond
+}
+
+define float @i32_icmp_ne_f32_val(i32 %s, float %f0, float %f1) nounwind readnone {
+entry:
+; ALL-LABEL: i32_icmp_ne_f32_val:
+
+; 32-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32-DAG: mtc1 $6, $[[F1:f0]]
+; 32: movn.s $[[F1]], $[[F0]], $4
+
+; 32R2-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32R2-DAG: mtc1 $6, $[[F1:f0]]
+; 32R2: movn.s $[[F1]], $[[F0]], $4
+
+; 32R6-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32R6-DAG: mtc1 $6, $[[F1:f[0-9]+]]
+; 32R6: sltu $[[T0:[0-9]+]], $zero, $4
+; 32R6: mtc1 $[[T0]], $[[CC:f0]]
+; 32R6: sel.s $[[CC]], $[[F1]], $[[F0]]
+
+; 64: movn.s $f14, $f13, $4
+; 64: mov.s $f0, $f14
+
+; 64R2: movn.s $f14, $f13, $4
+; 64R2: mov.s $f0, $f14
+
+; 64R6: sltu $[[T0:[0-9]+]], $zero, $4
+; 64R6: mtc1 $[[T0]], $[[CC:f0]]
+; 64R6: sel.s $[[CC]], $f14, $f13
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, float %f0, float %f1
ret float %cond
}
-define double @sel2_1(i32 %s, double %f0, double %f1) nounwind readnone {
+define double @i32_icmp_ne_f64_val(i32 %s, double %f0, double %f1) nounwind readnone {
entry:
-; CHECK: movn.d
+; ALL-LABEL: i32_icmp_ne_f64_val:
+
+; 32-DAG: mtc1 $6, $[[F0:f[1-3]*[02468]+]]
+; 32-DAG: mtc1 $7, $[[F0H:f[1-3]*[13579]+]]
+; 32-DAG: ldc1 $[[F1:f0]], 16($sp)
+; 32: movn.d $[[F1]], $[[F0]], $4
+
+; 32R2-DAG: mtc1 $6, $[[F0:f[0-9]+]]
+; 32R2-DAG: mthc1 $7, $[[F0]]
+; 32R2-DAG: ldc1 $[[F1:f0]], 16($sp)
+; 32R2: movn.d $[[F1]], $[[F0]], $4
+
+; 32R6-DAG: mtc1 $6, $[[F0:f[0-9]+]]
+; 32R6-DAG: mthc1 $7, $[[F0]]
+; 32R6-DAG: sltu $[[T0:[0-9]+]], $zero, $4
+; 32R6-DAG: mtc1 $[[T0]], $[[CC:f0]]
+; 32R6-DAG: ldc1 $[[F1:f[0-9]+]], 16($sp)
+; 32R6: sel.d $[[CC]], $[[F1]], $[[F0]]
+
+; 64: movn.d $f14, $f13, $4
+; 64: mov.d $f0, $f14
+
+; 64R2: movn.d $f14, $f13, $4
+; 64R2: mov.d $f0, $f14
+
+; 64R6-DAG: sltu $[[T0:[0-9]+]], $zero, $4
+; 64R6-DAG: mtc1 $[[T0]], $[[CC:f0]]
+; 64R6: sel.d $[[CC]], $f14, $f13
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, double %f0, double %f1
ret double %cond
}
-define float @sel3(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_oeq_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.eq.s
-; CHECK: movt.s
+; ALL-LABEL: f32_fcmp_oeq_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.eq.s $[[F2]], $[[F3]]
+; 32: movt.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.eq.s $[[F2]], $[[F3]]
+; 32R2: movt.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.eq.s $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.eq.s $f14, $f15
+; 64: movt.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.eq.s $f14, $f15
+; 64R2: movt.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.eq.s $[[CC:f0]], $f14, $f15
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define float @sel4(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_olt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.olt.s
-; CHECK: movt.s
+; ALL-LABEL: f32_fcmp_olt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.olt.s $[[F2]], $[[F3]]
+; 32: movt.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.olt.s $[[F2]], $[[F3]]
+; 32R2: movt.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.olt.s $f14, $f15
+; 64: movt.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.olt.s $f14, $f15
+; 64R2: movt.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f14, $f15
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define float @sel5(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_ogt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf.s
+; ALL-LABEL: f32_fcmp_ogt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.ule.s $f14, $f15
+; 64: movf.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define double @sel5_1(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
+define double @f32_fcmp_ogt_f64_val(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf.d
+; ALL-LABEL: f32_fcmp_ogt_f64_val:
+
+; 32-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.ule.s $f14, $f15
+; 64: movf.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel6(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_oeq_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.eq.d
-; CHECK: movt.d
+; ALL-LABEL: f64_fcmp_oeq_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.eq.d $[[F2]], $[[F3]]
+; 32: movt.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.eq.d $[[F2]], $[[F3]]
+; 32R2: movt.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.eq.d $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.eq.d $f14, $f15
+; 64: movt.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.eq.d $f14, $f15
+; 64R2: movt.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.eq.d $[[CC:f0]], $f14, $f15
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp oeq double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel7(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_olt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.olt.d
-; CHECK: movt.d
+; ALL-LABEL: f64_fcmp_olt_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.olt.d $[[F2]], $[[F3]]
+; 32: movt.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.olt.d $[[F2]], $[[F3]]
+; 32R2: movt.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.olt.d $f14, $f15
+; 64: movt.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.olt.d $f14, $f15
+; 64R2: movt.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f14, $f15
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp olt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel8(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_ogt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.ule.d
-; CHECK: movf.d
+; ALL-LABEL: f64_fcmp_ogt_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.ule.d $[[F2]], $[[F3]]
+; 32: movf.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.ule.d $[[F2]], $[[F3]]
+; 32R2: movf.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.ule.d $f14, $f15
+; 64: movf.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.ule.d $f14, $f15
+; 64R2: movf.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define float @sel8_1(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
+define float @f64_fcmp_ogt_f32_val(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.ule.d
-; CHECK: movf.s
+; ALL-LABEL: f64_fcmp_ogt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[1-3]*[02468]+]]
+; 32-DAG: mtc1 $7, $[[F2H:f[1-3]*[13579]+]]
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32: c.ule.d $[[F2]], $[[F3]]
+; 32: movf.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mthc1 $7, $[[F2]]
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32R2: c.ule.d $[[F2]], $[[F3]]
+; 32R2: movf.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mthc1 $7, $[[F2]]
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.ule.d $f14, $f15
+; 64: movf.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.ule.d $f14, $f15
+; 64R2: movf.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define i32 @sel9(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_oeq_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.eq.s
-; CHECK: movt
+; ALL-LABEL: f32_fcmp_oeq_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.eq.s $[[F2]], $[[F3]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.eq.s $[[F2]], $[[F3]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.eq.s $[[CC:f[0-9]+]], $[[F2]], $[[F3]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.eq.s $f14, $f15
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.eq.s $f14, $f15
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.eq.s $[[CC:f[0-9]+]], $f14, $f15
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel10(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_olt_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.olt.s
-; CHECK: movt
+; ALL-LABEL: f32_fcmp_olt_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.olt.s $[[F2]], $[[F3]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.olt.s $[[F2]], $[[F3]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f[0-9]+]], $[[F2]], $[[F3]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.olt.s $f14, $f15
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.olt.s $f14, $f15
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.lt.s $[[CC:f[0-9]+]], $f14, $f15
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel11(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_ogt_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf
+; ALL-LABEL: f32_fcmp_ogt_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f[0-9]+]], $[[F3]], $[[F2]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.ule.s $f14, $f15
+; 64: movf $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.lt.s $[[CC:f[0-9]+]], $f15, $f14
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel12(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_oeq_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.eq.d
-; CHECK: movt
+; ALL-LABEL: f64_fcmp_oeq_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.eq.d $[[TMP]], $[[TMP1]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.eq.d $[[TMP]], $[[TMP1]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.eq.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.eq.d $[[TMP]], $[[TMP1]]
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.eq.d $[[TMP]], $[[TMP1]]
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.eq.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp oeq double %tmp, %tmp1
@@ -137,10 +707,76 @@ entry:
ret i32 %cond
}
-define i32 @sel13(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_olt_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.olt.d
-; CHECK: movt
+; ALL-LABEL: f64_fcmp_olt_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.olt.d $[[TMP]], $[[TMP1]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.olt.d $[[TMP]], $[[TMP1]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.olt.d $[[TMP]], $[[TMP1]]
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.olt.d $[[TMP]], $[[TMP1]]
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp olt double %tmp, %tmp1
@@ -148,10 +784,76 @@ entry:
ret i32 %cond
}
-define i32 @sel14(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_ogt_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.ule.d
-; CHECK: movf
+; ALL-LABEL: f64_fcmp_ogt_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.ule.d $[[TMP]], $[[TMP1]]
+; 32: movf $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.ule.d $[[TMP]], $[[TMP1]]
+; 32R2: movf $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP1]], $[[TMP]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.ule.d $[[TMP]], $[[TMP1]]
+; 64: movf $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.ule.d $[[TMP]], $[[TMP1]]
+; 64R2: movf $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP1]], $[[TMP]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp ogt double %tmp, %tmp1
diff --git a/test/CodeGen/Mips/selectcc.ll b/test/CodeGen/Mips/selectcc.ll
index aeef60e..9790a0a 100644
--- a/test/CodeGen/Mips/selectcc.ll
+++ b/test/CodeGen/Mips/selectcc.ll
@@ -1,5 +1,7 @@
-; RUN: llc -march=mipsel < %s
-; RUN: llc -march=mipsel -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -march=mipsel -mcpu=mips32 < %s
+; RUN: llc -march=mipsel -mcpu=mips32 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
@gf0 = external global float
@gf1 = external global float
@@ -16,13 +18,11 @@ entry:
; SOURCE-SCHED: lw
; SOURCE-SCHED: lui
; SOURCE-SCHED: sw
-; SOURCE-SCHED: addiu
-; SOURCE-SCHED: addiu
-; SOURCE-SCHED: c.olt.s
-; SOURCE-SCHED: movt
+; SOURCE-SCHED: lw
+; SOURCE-SCHED: lwc1
; SOURCE-SCHED: mtc1
+; SOURCE-SCHED: c.olt.s
; SOURCE-SCHED: jr
-
store float 0.000000e+00, float* @gf0, align 4
store float 1.000000e+00, float* @gf1, align 4
%cmp = fcmp olt float %a, %b
diff --git a/test/CodeGen/Mips/tls-alias.ll b/test/CodeGen/Mips/tls-alias.ll
index 80fbe87..b61f84e 100644
--- a/test/CodeGen/Mips/tls-alias.ll
+++ b/test/CodeGen/Mips/tls-alias.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=mipsel -relocation-model=pic -disable-mips-delay-filler < %s | FileCheck %s
@foo = thread_local global i32 42
-@bar = hidden alias i32* @foo
+@bar = hidden thread_local alias i32* @foo
define i32* @zed() {
; CHECK-DAG: __tls_get_addr
diff --git a/test/CodeGen/Mips/zeroreg.ll b/test/CodeGen/Mips/zeroreg.ll
index e0e93e2..a1b6cb0 100644
--- a/test/CodeGen/Mips/zeroreg.ll
+++ b/test/CodeGen/Mips/zeroreg.ll
@@ -1,21 +1,109 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc < %s -march=mipsel -mcpu=mips4 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64R6
@g1 = external global i32
-define i32 @foo0(i32 %s) nounwind readonly {
+define i32 @sel_icmp_nez_i32_z0(i32 %s) nounwind readonly {
entry:
-; CHECK: movn ${{[0-9]+}}, $zero
+; ALL-LABEL: sel_icmp_nez_i32_z0:
+
+; 32-CMOV: lw $2, 0(${{[0-9]+}})
+; 32-CMOV: movn $2, $zero, $4
+
+; 32R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6: seleqz $2, $[[R0]], $4
+
+; 64-CMOV: lw $2, 0(${{[0-9]+}})
+; 64-CMOV: movn $2, $zero, $4
+
+; 64R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: seleqz $2, $[[R0]], $4
+
%tobool = icmp ne i32 %s, 0
%0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 0, i32 %0
ret i32 %cond
}
-define i32 @foo1(i32 %s) nounwind readonly {
+define i32 @sel_icmp_nez_i32_z1(i32 %s) nounwind readonly {
entry:
-; CHECK: movz ${{[0-9]+}}, $zero
+; ALL-LABEL: sel_icmp_nez_i32_z1:
+
+; 32-CMOV: lw $2, 0(${{[0-9]+}})
+; 32-CMOV: movz $2, $zero, $4
+
+; 32R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6: selnez $2, $[[R0]], $4
+
+; 64-CMOV: lw $2, 0(${{[0-9]+}})
+; 64-CMOV: movz $2, $zero, $4
+
+; 64R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: selnez $2, $[[R0]], $4
+
%tobool = icmp ne i32 %s, 0
%0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 %0, i32 0
ret i32 %cond
}
+
+@g2 = external global i64
+
+define i64 @sel_icmp_nez_i64_z0(i64 %s) nounwind readonly {
+entry:
+; ALL-LABEL: sel_icmp_nez_i64_z0:
+
+; 32-CMOV-DAG: lw $[[R0:2]], 0(${{[0-9]+}})
+; 32-CMOV-DAG: lw $[[R1:3]], 4(${{[0-9]+}})
+; 32-CMOV-DAG: movn $[[R0]], $zero, $4
+; 32-CMOV-DAG: movn $[[R1]], $zero, $4
+
+; 32R6-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-DAG: or $[[CC:[0-9]+]], $4, $5
+; 32R6-DAG: seleqz $2, $[[R0]], $[[CC]]
+; 32R6-DAG: seleqz $3, $[[R1]], $[[CC]]
+
+; 64-CMOV: ld $2, 0(${{[0-9]+}})
+; 64-CMOV: movn $2, $zero, $4
+
+; 64R6: ld $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: seleqz $2, $[[R0]], $4
+
+ %tobool = icmp ne i64 %s, 0
+ %0 = load i64* @g2, align 4
+ %cond = select i1 %tobool, i64 0, i64 %0
+ ret i64 %cond
+}
+
+define i64 @sel_icmp_nez_i64_z1(i64 %s) nounwind readonly {
+entry:
+; ALL-LABEL: sel_icmp_nez_i64_z1:
+
+; 32-CMOV-DAG: lw $[[R0:2]], 0(${{[0-9]+}})
+; 32-CMOV-DAG: lw $[[R1:3]], 4(${{[0-9]+}})
+; 32-CMOV-DAG: movz $[[R0]], $zero, $4
+; 32-CMOV-DAG: movz $[[R1]], $zero, $4
+
+; 32R6-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-DAG: or $[[CC:[0-9]+]], $4, $5
+; 32R6-DAG: selnez $2, $[[R0]], $[[CC]]
+; 32R6-DAG: selnez $3, $[[R1]], $[[CC]]
+
+; 64-CMOV: ld $2, 0(${{[0-9]+}})
+; 64-CMOV: movz $2, $zero, $4
+
+; 64R6: ld $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: selnez $2, $[[R0]], $4
+
+ %tobool = icmp ne i64 %s, 0
+ %0 = load i64* @g2, align 4
+ %cond = select i1 %tobool, i64 %0, i64 0
+ ret i64 %cond
+}
diff --git a/test/CodeGen/NVPTX/access-non-generic.ll b/test/CodeGen/NVPTX/access-non-generic.ll
index 0622aa3..c225abf 100644
--- a/test/CodeGen/NVPTX/access-non-generic.ll
+++ b/test/CodeGen/NVPTX/access-non-generic.ll
@@ -74,13 +74,13 @@ define float @ld_st_shared_f32(i32 %i, float %v) {
ret float %sum5
}
-; Verifies nvptx-favor-non-generic keeps addrspacecasts between pointers of
-; different element types.
+; When hoisting an addrspacecast between different pointer types, replace the
+; addrspacecast with a bitcast.
define i32 @ld_int_from_float() {
; IR-LABEL: @ld_int_from_float
-; IR: addrspacecast
+; IR: load i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*)
; PTX-LABEL: ld_int_from_float(
-; PTX: cvta.shared.u{{(32|64)}}
+; PTX: ld.shared.u{{(32|64)}}
%1 = load i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4
ret i32 %1
}
diff --git a/test/CodeGen/NVPTX/arg-lowering.ll b/test/CodeGen/NVPTX/arg-lowering.ll
new file mode 100644
index 0000000..f7b8a14
--- /dev/null
+++ b/test/CodeGen/NVPTX/arg-lowering.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: .visible .func (.param .align 16 .b8 func_retval0[16]) foo0(
+; CHECK: .param .align 4 .b8 foo0_param_0[8]
+define <4 x float> @foo0({float, float} %arg0) {
+ ret <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[8]) foo1(
+; CHECK: .param .align 8 .b8 foo1_param_0[16]
+define <2 x float> @foo1({float, float, i64} %arg0) {
+ ret <2 x float> <float 1.0, float 1.0>
+}
diff --git a/test/CodeGen/NVPTX/atomics.ll b/test/CodeGen/NVPTX/atomics.ll
new file mode 100644
index 0000000..10ab73d
--- /dev/null
+++ b/test/CodeGen/NVPTX/atomics.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: atom0
+define i32 @atom0(i32* %addr, i32 %val) {
+; CHECK: atom.add.u32
+ %ret = atomicrmw add i32* %addr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom1
+define i64 @atom1(i64* %addr, i64 %val) {
+; CHECK: atom.add.u64
+ %ret = atomicrmw add i64* %addr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom2
+define i32 @atom2(i32* %subr, i32 %val) {
+; CHECK: neg.s32
+; CHECK: atom.add.u32
+ %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom3
+define i64 @atom3(i64* %subr, i64 %val) {
+; CHECK: neg.s64
+; CHECK: atom.add.u64
+ %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom4
+define i32 @atom4(i32* %subr, i32 %val) {
+; CHECK: atom.and.b32
+ %ret = atomicrmw and i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom5
+define i64 @atom5(i64* %subr, i64 %val) {
+; CHECK: atom.and.b64
+ %ret = atomicrmw and i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+;; NAND not yet supported
+;define i32 @atom6(i32* %subr, i32 %val) {
+; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
+; ret i32 %ret
+;}
+
+;define i64 @atom7(i64* %subr, i64 %val) {
+; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
+; ret i64 %ret
+;}
+
+; CHECK: atom8
+define i32 @atom8(i32* %subr, i32 %val) {
+; CHECK: atom.or.b32
+ %ret = atomicrmw or i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom9
+define i64 @atom9(i64* %subr, i64 %val) {
+; CHECK: atom.or.b64
+ %ret = atomicrmw or i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom10
+define i32 @atom10(i32* %subr, i32 %val) {
+; CHECK: atom.xor.b32
+ %ret = atomicrmw xor i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom11
+define i64 @atom11(i64* %subr, i64 %val) {
+; CHECK: atom.xor.b64
+ %ret = atomicrmw xor i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom12
+define i32 @atom12(i32* %subr, i32 %val) {
+; CHECK: atom.max.s32
+ %ret = atomicrmw max i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom13
+define i64 @atom13(i64* %subr, i64 %val) {
+; CHECK: atom.max.s64
+ %ret = atomicrmw max i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom14
+define i32 @atom14(i32* %subr, i32 %val) {
+; CHECK: atom.min.s32
+ %ret = atomicrmw min i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom15
+define i64 @atom15(i64* %subr, i64 %val) {
+; CHECK: atom.min.s64
+ %ret = atomicrmw min i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom16
+define i32 @atom16(i32* %subr, i32 %val) {
+; CHECK: atom.max.u32
+ %ret = atomicrmw umax i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom17
+define i64 @atom17(i64* %subr, i64 %val) {
+; CHECK: atom.max.u64
+ %ret = atomicrmw umax i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom18
+define i32 @atom18(i32* %subr, i32 %val) {
+; CHECK: atom.min.u32
+ %ret = atomicrmw umin i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom19
+define i64 @atom19(i64* %subr, i64 %val) {
+; CHECK: atom.min.u64
+ %ret = atomicrmw umin i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
diff --git a/test/CodeGen/NVPTX/bfe.ll b/test/CodeGen/NVPTX/bfe.ll
new file mode 100644
index 0000000..2e816fe
--- /dev/null
+++ b/test/CodeGen/NVPTX/bfe.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: bfe0
+define i32 @bfe0(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 4, 4
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 4
+ %val1 = and i32 %val0, 15
+ ret i32 %val1
+}
+
+; CHECK: bfe1
+define i32 @bfe1(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 3, 3
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 3
+ %val1 = and i32 %val0, 7
+ ret i32 %val1
+}
+
+; CHECK: bfe2
+define i32 @bfe2(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 5, 3
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 5
+ %val1 = and i32 %val0, 7
+ ret i32 %val1
+}
diff --git a/test/CodeGen/NVPTX/envreg.ll b/test/CodeGen/NVPTX/envreg.ll
new file mode 100644
index 0000000..a341b49
--- /dev/null
+++ b/test/CodeGen/NVPTX/envreg.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg0()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg1()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg2()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg3()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg4()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg5()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg6()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg7()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg8()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg9()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg10()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg11()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg12()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg13()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg14()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg15()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg16()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg17()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg18()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg19()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg20()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg21()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg22()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg23()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg24()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg25()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg26()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg27()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg28()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg29()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg30()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg31()
+
+
+; CHECK: foo
+define i32 @foo() {
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg0
+ %val0 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg0()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg1
+ %val1 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg1()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg2
+ %val2 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg2()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg3
+ %val3 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg3()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg4
+ %val4 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg4()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg5
+ %val5 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg5()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg6
+ %val6 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg6()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg7
+ %val7 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg7()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg8
+ %val8 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg8()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg9
+ %val9 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg9()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg10
+ %val10 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg10()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg11
+ %val11 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg11()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg12
+ %val12 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg12()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg13
+ %val13 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg13()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg14
+ %val14 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg14()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg15
+ %val15 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg15()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg16
+ %val16 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg16()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg17
+ %val17 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg17()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg18
+ %val18 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg18()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg19
+ %val19 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg19()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg20
+ %val20 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg20()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg21
+ %val21 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg21()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg22
+ %val22 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg22()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg23
+ %val23 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg23()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg24
+ %val24 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg24()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg25
+ %val25 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg25()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg26
+ %val26 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg26()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg27
+ %val27 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg27()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg28
+ %val28 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg28()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg29
+ %val29 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg29()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg30
+ %val30 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg30()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg31
+ %val31 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg31()
+
+
+ %ret0 = add i32 %val0, %val1
+ %ret1 = add i32 %ret0, %val2
+ %ret2 = add i32 %ret1, %val3
+ %ret3 = add i32 %ret2, %val4
+ %ret4 = add i32 %ret3, %val5
+ %ret5 = add i32 %ret4, %val6
+ %ret6 = add i32 %ret5, %val7
+ %ret7 = add i32 %ret6, %val8
+ %ret8 = add i32 %ret7, %val9
+ %ret9 = add i32 %ret8, %val10
+ %ret10 = add i32 %ret9, %val11
+ %ret11 = add i32 %ret10, %val12
+ %ret12 = add i32 %ret11, %val13
+ %ret13 = add i32 %ret12, %val14
+ %ret14 = add i32 %ret13, %val15
+ %ret15 = add i32 %ret14, %val16
+ %ret16 = add i32 %ret15, %val17
+ %ret17 = add i32 %ret16, %val18
+ %ret18 = add i32 %ret17, %val19
+ %ret19 = add i32 %ret18, %val20
+ %ret20 = add i32 %ret19, %val21
+ %ret21 = add i32 %ret20, %val22
+ %ret22 = add i32 %ret21, %val23
+ %ret23 = add i32 %ret22, %val24
+ %ret24 = add i32 %ret23, %val25
+ %ret25 = add i32 %ret24, %val26
+ %ret26 = add i32 %ret25, %val27
+ %ret27 = add i32 %ret26, %val28
+ %ret28 = add i32 %ret27, %val29
+ %ret29 = add i32 %ret28, %val30
+ %ret30 = add i32 %ret29, %val31
+
+ ret i32 %ret30
+}
diff --git a/test/CodeGen/NVPTX/gvar-init.ll b/test/CodeGen/NVPTX/gvar-init.ll
new file mode 100644
index 0000000..8c95942
--- /dev/null
+++ b/test/CodeGen/NVPTX/gvar-init.ll
@@ -0,0 +1,5 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; Error out if initializer is given for address spaces that do not support initializers
+; XFAIL: *
+@g0 = addrspace(3) global i32 42
diff --git a/test/CodeGen/NVPTX/imad.ll b/test/CodeGen/NVPTX/imad.ll
new file mode 100644
index 0000000..67421c7
--- /dev/null
+++ b/test/CodeGen/NVPTX/imad.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: imad
+define i32 @imad(i32 %a, i32 %b, i32 %c) {
+; CHECK: mad.lo.s32
+ %val0 = mul i32 %a, %b
+ %val1 = add i32 %val0, %c
+ ret i32 %val1
+}
diff --git a/test/CodeGen/NVPTX/inline-asm.ll b/test/CodeGen/NVPTX/inline-asm.ll
index d76eb42..6f0578d 100644
--- a/test/CodeGen/NVPTX/inline-asm.ll
+++ b/test/CodeGen/NVPTX/inline-asm.ll
@@ -7,3 +7,10 @@ entry:
%0 = call float asm "ex2.approx.ftz.f32 $0, $1;", "=f,f"(float %x)
ret float %0
}
+
+define i32 @foo(i1 signext %cond, i32 %a, i32 %b) #0 {
+entry:
+; CHECK: selp.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %p{{[0-9]+}}
+ %0 = tail call i32 asm "selp.b32 $0, $1, $2, $3;", "=r,r,r,b"(i32 %a, i32 %b, i1 %cond)
+ ret i32 %0
+}
diff --git a/test/CodeGen/NVPTX/isspacep.ll b/test/CodeGen/NVPTX/isspacep.ll
new file mode 100644
index 0000000..47fa7a6
--- /dev/null
+++ b/test/CodeGen/NVPTX/isspacep.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+declare i1 @llvm.nvvm.isspacep.const(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.global(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.local(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.shared(i8*) readnone noinline
+
+; CHECK: is_const
+define i1 @is_const(i8* %addr) {
+; CHECK: isspacep.const
+ %v = tail call i1 @llvm.nvvm.isspacep.const(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_global
+define i1 @is_global(i8* %addr) {
+; CHECK: isspacep.global
+ %v = tail call i1 @llvm.nvvm.isspacep.global(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_local
+define i1 @is_local(i8* %addr) {
+; CHECK: isspacep.local
+ %v = tail call i1 @llvm.nvvm.isspacep.local(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_shared
+define i1 @is_shared(i8* %addr) {
+; CHECK: isspacep.shared
+ %v = tail call i1 @llvm.nvvm.isspacep.shared(i8* %addr)
+ ret i1 %v
+}
+
diff --git a/test/CodeGen/NVPTX/ldu-i8.ll b/test/CodeGen/NVPTX/ldu-i8.ll
index 81a82b2..9cc6675 100644
--- a/test/CodeGen/NVPTX/ldu-i8.ll
+++ b/test/CodeGen/NVPTX/ldu-i8.ll
@@ -2,13 +2,15 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-declare i8 @llvm.nvvm.ldu.global.i.i8(i8*)
+declare i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8*)
define i8 @foo(i8* %a) {
; Ensure we properly truncate off the high-order 24 bits
; CHECK: ldu.global.u8
; CHECK: cvt.u32.u16
; CHECK: and.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, 255
- %val = tail call i8 @llvm.nvvm.ldu.global.i.i8(i8* %a)
+ %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8* %a), !align !0
ret i8 %val
}
+
+!0 = metadata !{i32 4}
diff --git a/test/CodeGen/NVPTX/ldu-ldg.ll b/test/CodeGen/NVPTX/ldu-ldg.ll
new file mode 100644
index 0000000..3b0619f
--- /dev/null
+++ b/test/CodeGen/NVPTX/ldu-ldg.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+declare i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr)
+declare i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr)
+declare i8 @llvm.nvvm.ldg.global.i.i8.p1i8(i8 addrspace(1)* %ptr)
+declare i32 @llvm.nvvm.ldg.global.i.i32.p1i32(i32 addrspace(1)* %ptr)
+
+
+; CHECK: func0
+define i8 @func0(i8 addrspace(1)* %ptr) {
+; ldu.global.u8
+ %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr), !align !0
+ ret i8 %val
+}
+
+; CHECK: func1
+define i32 @func1(i32 addrspace(1)* %ptr) {
+; ldu.global.u32
+ %val = tail call i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr), !align !0
+ ret i32 %val
+}
+
+; CHECK: func2
+define i8 @func2(i8 addrspace(1)* %ptr) {
+; ld.global.nc.u8
+ %val = tail call i8 @llvm.nvvm.ldg.global.i.i8.p1i8(i8 addrspace(1)* %ptr), !align !0
+ ret i8 %val
+}
+
+; CHECK: func3
+define i32 @func3(i32 addrspace(1)* %ptr) {
+; ld.global.nc.u32
+ %val = tail call i32 @llvm.nvvm.ldg.global.i.i32.p1i32(i32 addrspace(1)* %ptr), !align !0
+ ret i32 %val
+}
+
+
+
+!0 = metadata !{i32 4}
diff --git a/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll b/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
index 26cadc4..55707ea 100644
--- a/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
+++ b/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
@@ -7,9 +7,9 @@ define void @reg_plus_offset(i32* %a) {
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+32];
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+36];
%p2 = getelementptr i32* %a, i32 8
- %t1 = call i32 @llvm.nvvm.ldu.global.i.i32(i32* %p2), !align !1
+ %t1 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p2), !align !1
%p3 = getelementptr i32* %a, i32 9
- %t2 = call i32 @llvm.nvvm.ldu.global.i.i32(i32* %p3), !align !1
+ %t2 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p3), !align !1
%t3 = mul i32 %t1, %t2
store i32 %t3, i32* %a
ret void
@@ -17,5 +17,5 @@ define void @reg_plus_offset(i32* %a) {
!1 = metadata !{ i32 4 }
-declare i32 @llvm.nvvm.ldu.global.i.i32(i32*)
+declare i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32*)
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
diff --git a/test/CodeGen/NVPTX/lit.local.cfg b/test/CodeGen/NVPTX/lit.local.cfg
index 85cf8c2..2cb98eb 100644
--- a/test/CodeGen/NVPTX/lit.local.cfg
+++ b/test/CodeGen/NVPTX/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'NVPTX' in targets:
+if not 'NVPTX' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/NVPTX/managed.ll b/test/CodeGen/NVPTX/managed.ll
new file mode 100644
index 0000000..4d7e781
--- /dev/null
+++ b/test/CodeGen/NVPTX/managed.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: .visible .global .align 4 .u32 device_g;
+@device_g = addrspace(1) global i32 zeroinitializer
+; CHECK: .visible .global .attribute(.managed) .align 4 .u32 managed_g;
+@managed_g = addrspace(1) global i32 zeroinitializer
+
+
+!nvvm.annotations = !{!0}
+!0 = metadata !{i32 addrspace(1)* @managed_g, metadata !"managed", i32 1}
diff --git a/test/CodeGen/NVPTX/mulwide.ll b/test/CodeGen/NVPTX/mulwide.ll
new file mode 100644
index 0000000..927946c
--- /dev/null
+++ b/test/CodeGen/NVPTX/mulwide.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: mulwide16
+define i32 @mulwide16(i16 %a, i16 %b) {
+; CHECK: mul.wide.s16
+ %val0 = sext i16 %a to i32
+ %val1 = sext i16 %b to i32
+ %val2 = mul i32 %val0, %val1
+ ret i32 %val2
+}
+
+; CHECK: mulwideu16
+define i32 @mulwideu16(i16 %a, i16 %b) {
+; CHECK: mul.wide.u16
+ %val0 = zext i16 %a to i32
+ %val1 = zext i16 %b to i32
+ %val2 = mul i32 %val0, %val1
+ ret i32 %val2
+}
+
+; CHECK: mulwide32
+define i64 @mulwide32(i32 %a, i32 %b) {
+; CHECK: mul.wide.s32
+ %val0 = sext i32 %a to i64
+ %val1 = sext i32 %b to i64
+ %val2 = mul i64 %val0, %val1
+ ret i64 %val2
+}
+
+; CHECK: mulwideu32
+define i64 @mulwideu32(i32 %a, i32 %b) {
+; CHECK: mul.wide.u32
+ %val0 = zext i32 %a to i64
+ %val1 = zext i32 %b to i64
+ %val2 = mul i64 %val0, %val1
+ ret i64 %val2
+}
diff --git a/test/CodeGen/NVPTX/nvvm-reflect.ll b/test/CodeGen/NVPTX/nvvm-reflect.ll
index 0d02194..21e9c69 100644
--- a/test/CodeGen/NVPTX/nvvm-reflect.ll
+++ b/test/CodeGen/NVPTX/nvvm-reflect.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=0 -O2 | FileCheck %s --check-prefix=USE_MUL_0
; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=1 -O2 | FileCheck %s --check-prefix=USE_MUL_1
-@str = private addrspace(4) unnamed_addr constant [8 x i8] c"USE_MUL\00"
+@str = private unnamed_addr addrspace(4) constant [8 x i8] c"USE_MUL\00"
declare i32 @__nvvm_reflect(i8*)
declare i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)*)
@@ -32,3 +32,17 @@ exit:
%ret = phi float [%ret1, %use_mul], [%ret2, %use_add]
ret float %ret
}
+
+declare i32 @llvm.nvvm.reflect.p0i8(i8*)
+
+; USE_MUL_0: define i32 @intrinsic
+; USE_MUL_1: define i32 @intrinsic
+define i32 @intrinsic() {
+; USE_MUL_0-NOT: call i32 @llvm.nvvm.reflect
+; USE_MUL_0: ret i32 0
+; USE_MUL_1-NOT: call i32 @llvm.nvvm.reflect
+; USE_MUL_1: ret i32 1
+ %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([8 x i8] addrspace(4)* @str, i32 0, i32 0))
+ %reflect = tail call i32 @llvm.nvvm.reflect.p0i8(i8* %ptr)
+ ret i32 %reflect
+}
diff --git a/test/CodeGen/NVPTX/rotate.ll b/test/CodeGen/NVPTX/rotate.ll
new file mode 100644
index 0000000..dfc8b4f
--- /dev/null
+++ b/test/CodeGen/NVPTX/rotate.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck --check-prefix=SM20 %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck --check-prefix=SM35 %s
+
+
+declare i32 @llvm.nvvm.rotate.b32(i32, i32)
+declare i64 @llvm.nvvm.rotate.b64(i64, i32)
+declare i64 @llvm.nvvm.rotate.right.b64(i64, i32)
+
+; SM20: rotate32
+; SM35: rotate32
+define i32 @rotate32(i32 %a, i32 %b) {
+; SM20: shl.b32
+; SM20: sub.s32
+; SM20: shr.b32
+; SM20: add.u32
+; SM35: shf.l.wrap.b32
+ %val = tail call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 %b)
+ ret i32 %val
+}
+
+; SM20: rotate64
+; SM35: rotate64
+define i64 @rotate64(i64 %a, i32 %b) {
+; SM20: shl.b64
+; SM20: sub.u32
+; SM20: shr.b64
+; SM20: add.u64
+; SM35: shf.l.wrap.b32
+; SM35: shf.l.wrap.b32
+ %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b)
+ ret i64 %val
+}
+
+; SM20: rotateright64
+; SM35: rotateright64
+define i64 @rotateright64(i64 %a, i32 %b) {
+; SM20: shr.b64
+; SM20: sub.u32
+; SM20: shl.b64
+; SM20: add.u64
+; SM35: shf.r.wrap.b32
+; SM35: shf.r.wrap.b32
+ %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b)
+ ret i64 %val
+}
+
+; SM20: rotl0
+; SM35: rotl0
+define i32 @rotl0(i32 %x) {
+; SM20: shl.b32
+; SM20: shr.b32
+; SM20: add.u32
+; SM35: shf.l.wrap.b32
+ %t0 = shl i32 %x, 8
+ %t1 = lshr i32 %x, 24
+ %t2 = or i32 %t0, %t1
+ ret i32 %t2
+}
diff --git a/test/CodeGen/NVPTX/shift-parts.ll b/test/CodeGen/NVPTX/shift-parts.ll
new file mode 100644
index 0000000..748297c
--- /dev/null
+++ b/test/CodeGen/NVPTX/shift-parts.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: shift_parts_left_128
+define void @shift_parts_left_128(i128* %val, i128* %amtptr) {
+; CHECK: shl.b64
+; CHECK: mov.u32
+; CHECK: sub.s32
+; CHECK: shr.u64
+; CHECK: or.b64
+; CHECK: add.s32
+; CHECK: shl.b64
+; CHECK: setp.gt.s32
+; CHECK: selp.b64
+; CHECK: shl.b64
+ %amt = load i128* %amtptr
+ %a = load i128* %val
+ %val0 = shl i128 %a, %amt
+ store i128 %val0, i128* %val
+ ret void
+}
+
+; CHECK: shift_parts_right_128
+define void @shift_parts_right_128(i128* %val, i128* %amtptr) {
+; CHECK: shr.u64
+; CHECK: sub.s32
+; CHECK: shl.b64
+; CHECK: or.b64
+; CHECK: add.s32
+; CHECK: shr.s64
+; CHECK: setp.gt.s32
+; CHECK: selp.b64
+; CHECK: shr.s64
+ %amt = load i128* %amtptr
+ %a = load i128* %val
+ %val0 = ashr i128 %a, %amt
+ store i128 %val0, i128* %val
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/weak-global.ll b/test/CodeGen/NVPTX/weak-global.ll
new file mode 100644
index 0000000..2bef4c5
--- /dev/null
+++ b/test/CodeGen/NVPTX/weak-global.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: .weak .global .align 4 .u32 g
+@g = common addrspace(1) global i32 zeroinitializer
+
+define i32 @func0() {
+ %val = load i32 addrspace(1)* @g
+ ret i32 %val
+}
diff --git a/test/CodeGen/NVPTX/weak-linkage.ll b/test/CodeGen/NVPTX/weak-linkage.ll
new file mode 100644
index 0000000..7a13357
--- /dev/null
+++ b/test/CodeGen/NVPTX/weak-linkage.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: .weak .func foo
+define weak void @foo() {
+ ret void
+}
+
+; CHECK: .visible .func bar
+define void @bar() {
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/Atomics-32.ll b/test/CodeGen/PowerPC/Atomics-32.ll
index b5c03e2..b7f23b1 100644
--- a/test/CodeGen/PowerPC/Atomics-32.ll
+++ b/test/CodeGen/PowerPC/Atomics-32.ll
@@ -529,63 +529,73 @@ define void @test_compare_and_swap() nounwind {
entry:
%0 = load i8* @uc, align 1
%1 = load i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
+ %pair2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
+ %2 = extractvalue { i8, i1 } %pair2, 0
store i8 %2, i8* @sc, align 1
%3 = load i8* @uc, align 1
%4 = load i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
+ %pair5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
+ %5 = extractvalue { i8, i1 } %pair5, 0
store i8 %5, i8* @uc, align 1
%6 = load i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = load i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
+ %pair11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
+ %11 = extractvalue { i16, i1 } %pair11, 0
store i16 %11, i16* @ss, align 2
%12 = load i8* @uc, align 1
%13 = zext i8 %12 to i16
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
+ %pair17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
+ %17 = extractvalue { i16, i1 } %pair17, 0
store i16 %17, i16* @us, align 2
%18 = load i8* @uc, align 1
%19 = zext i8 %18 to i32
%20 = load i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
+ %pair23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
+ %23 = extractvalue { i32, i1 } %pair23, 0
store i32 %23, i32* @si, align 4
%24 = load i8* @uc, align 1
%25 = zext i8 %24 to i32
%26 = load i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
+ %pair29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
+ %29 = extractvalue { i32, i1 } %pair29, 0
store i32 %29, i32* @ui, align 4
%30 = load i8* @uc, align 1
%31 = zext i8 %30 to i32
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic monotonic
+ %pair35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic monotonic
+ %35 = extractvalue { i32, i1 } %pair35, 0
store i32 %35, i32* @sl, align 4
%36 = load i8* @uc, align 1
%37 = zext i8 %36 to i32
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic monotonic
+ %pair41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic monotonic
+ %41 = extractvalue { i32, i1 } %pair41, 0
store i32 %41, i32* @ul, align 4
%42 = load i8* @uc, align 1
%43 = load i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
+ %pair44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
+ %44 = extractvalue { i8, i1 } %pair44, 0
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i32
store i32 %46, i32* @ui, align 4
%47 = load i8* @uc, align 1
%48 = load i8* @sc, align 1
- %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic monotonic
+ %pair49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic monotonic
+ %49 = extractvalue { i8, i1 } %pair49, 0
%50 = icmp eq i8 %49, %47
%51 = zext i1 %50 to i32
store i32 %51, i32* @ui, align 4
@@ -594,7 +604,8 @@ entry:
%54 = load i8* @sc, align 1
%55 = sext i8 %54 to i16
%56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic monotonic
+ %pair57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic monotonic
+ %57 = extractvalue { i16, i1 } %pair57, 0
%58 = icmp eq i16 %57, %53
%59 = zext i1 %58 to i32
store i32 %59, i32* @ui, align 4
@@ -603,7 +614,8 @@ entry:
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i16
%64 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic monotonic
+ %pair65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic monotonic
+ %65 = extractvalue { i16, i1 } %pair65, 0
%66 = icmp eq i16 %65, %61
%67 = zext i1 %66 to i32
store i32 %67, i32* @ui, align 4
@@ -612,7 +624,8 @@ entry:
%70 = load i8* @sc, align 1
%71 = sext i8 %70 to i32
%72 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic monotonic
+ %pair73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic monotonic
+ %73 = extractvalue { i32, i1 } %pair73, 0
%74 = icmp eq i32 %73, %69
%75 = zext i1 %74 to i32
store i32 %75, i32* @ui, align 4
@@ -621,7 +634,8 @@ entry:
%78 = load i8* @sc, align 1
%79 = sext i8 %78 to i32
%80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic monotonic
+ %pair81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic monotonic
+ %81 = extractvalue { i32, i1 } %pair81, 0
%82 = icmp eq i32 %81, %77
%83 = zext i1 %82 to i32
store i32 %83, i32* @ui, align 4
@@ -630,7 +644,8 @@ entry:
%86 = load i8* @sc, align 1
%87 = sext i8 %86 to i32
%88 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic monotonic
+ %pair89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic monotonic
+ %89 = extractvalue { i32, i1 } %pair89, 0
%90 = icmp eq i32 %89, %85
%91 = zext i1 %90 to i32
store i32 %91, i32* @ui, align 4
@@ -639,7 +654,8 @@ entry:
%94 = load i8* @sc, align 1
%95 = sext i8 %94 to i32
%96 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic monotonic
+ %pair97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic monotonic
+ %97 = extractvalue { i32, i1 } %pair97, 0
%98 = icmp eq i32 %97, %93
%99 = zext i1 %98 to i32
store i32 %99, i32* @ui, align 4
diff --git a/test/CodeGen/PowerPC/Frames-alloca.ll b/test/CodeGen/PowerPC/Frames-alloca.ll
index 4588bc0..c701fef 100644
--- a/test/CodeGen/PowerPC/Frames-alloca.ll
+++ b/test/CodeGen/PowerPC/Frames-alloca.ll
@@ -12,15 +12,15 @@
; CHECK-PPC32-NOFP: stw r31, -4(r1)
; CHECK-PPC32-NOFP: lwz r1, 0(r1)
; CHECK-PPC32-NOFP: lwz r31, -4(r1)
-; CHECK-PPC32-RS: stwu r1, -80(r1)
-; CHECK-PPC32-RS-NOFP: stwu r1, -80(r1)
+; CHECK-PPC32-RS: stwu r1, -48(r1)
+; CHECK-PPC32-RS-NOFP: stwu r1, -48(r1)
; CHECK-PPC64: std r31, -8(r1)
-; CHECK-PPC64: stdu r1, -128(r1)
+; CHECK-PPC64: stdu r1, -64(r1)
; CHECK-PPC64: ld r1, 0(r1)
; CHECK-PPC64: ld r31, -8(r1)
; CHECK-PPC64-NOFP: std r31, -8(r1)
-; CHECK-PPC64-NOFP: stdu r1, -128(r1)
+; CHECK-PPC64-NOFP: stdu r1, -64(r1)
; CHECK-PPC64-NOFP: ld r1, 0(r1)
; CHECK-PPC64-NOFP: ld r31, -8(r1)
diff --git a/test/CodeGen/PowerPC/Frames-large.ll b/test/CodeGen/PowerPC/Frames-large.ll
index d07fea7..0ccea42 100644
--- a/test/CodeGen/PowerPC/Frames-large.ll
+++ b/test/CodeGen/PowerPC/Frames-large.ll
@@ -15,9 +15,9 @@ define i32* @f1() nounwind {
; PPC32-NOFP: _f1:
; PPC32-NOFP: lis r0, -1
-; PPC32-NOFP: ori r0, r0, 32704
+; PPC32-NOFP: ori r0, r0, 32736
; PPC32-NOFP: stwux r1, r1, r0
-; PPC32-NOFP: addi r3, r1, 68
+; PPC32-NOFP: addi r3, r1, 36
; PPC32-NOFP: lwz r1, 0(r1)
; PPC32-NOFP: blr
@@ -25,10 +25,10 @@ define i32* @f1() nounwind {
; PPC32-FP: _f1:
; PPC32-FP: lis r0, -1
; PPC32-FP: stw r31, -4(r1)
-; PPC32-FP: ori r0, r0, 32704
+; PPC32-FP: ori r0, r0, 32736
; PPC32-FP: stwux r1, r1, r0
; PPC32-FP: mr r31, r1
-; PPC32-FP: addi r3, r31, 64
+; PPC32-FP: addi r3, r31, 32
; PPC32-FP: lwz r1, 0(r1)
; PPC32-FP: lwz r31, -4(r1)
; PPC32-FP: blr
@@ -36,9 +36,9 @@ define i32* @f1() nounwind {
; PPC64-NOFP: _f1:
; PPC64-NOFP: lis r0, -1
-; PPC64-NOFP: ori r0, r0, 32656
+; PPC64-NOFP: ori r0, r0, 32720
; PPC64-NOFP: stdux r1, r1, r0
-; PPC64-NOFP: addi r3, r1, 116
+; PPC64-NOFP: addi r3, r1, 52
; PPC64-NOFP: ld r1, 0(r1)
; PPC64-NOFP: blr
@@ -46,10 +46,10 @@ define i32* @f1() nounwind {
; PPC64-FP: _f1:
; PPC64-FP: lis r0, -1
; PPC64-FP: std r31, -8(r1)
-; PPC64-FP: ori r0, r0, 32640
+; PPC64-FP: ori r0, r0, 32704
; PPC64-FP: stdux r1, r1, r0
; PPC64-FP: mr r31, r1
-; PPC64-FP: addi r3, r31, 124
+; PPC64-FP: addi r3, r31, 60
; PPC64-FP: ld r1, 0(r1)
; PPC64-FP: ld r31, -8(r1)
; PPC64-FP: blr
diff --git a/test/CodeGen/PowerPC/Frames-small.ll b/test/CodeGen/PowerPC/Frames-small.ll
index 0f6bd10..28c1a5b 100644
--- a/test/CodeGen/PowerPC/Frames-small.ll
+++ b/test/CodeGen/PowerPC/Frames-small.ll
@@ -1,25 +1,25 @@
; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -o %t1
; RUN: not grep "stw r31, -4(r1)" %t1
-; RUN: grep "stwu r1, -16448(r1)" %t1
-; RUN: grep "addi r1, r1, 16448" %t1
+; RUN: grep "stwu r1, -16416(r1)" %t1
+; RUN: grep "addi r1, r1, 16416" %t1
; RUN: llc < %s -march=ppc32 | \
; RUN: not grep "lwz r31, -4(r1)"
; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
; RUN: -o %t2
; RUN: grep "stw r31, -4(r1)" %t2
-; RUN: grep "stwu r1, -16448(r1)" %t2
-; RUN: grep "addi r1, r1, 16448" %t2
+; RUN: grep "stwu r1, -16416(r1)" %t2
+; RUN: grep "addi r1, r1, 16416" %t2
; RUN: grep "lwz r31, -4(r1)" %t2
; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -o %t3
; RUN: not grep "std r31, -8(r1)" %t3
-; RUN: grep "stdu r1, -16496(r1)" %t3
-; RUN: grep "addi r1, r1, 16496" %t3
+; RUN: grep "stdu r1, -16432(r1)" %t3
+; RUN: grep "addi r1, r1, 16432" %t3
; RUN: not grep "ld r31, -8(r1)" %t3
; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
; RUN: -o %t4
; RUN: grep "std r31, -8(r1)" %t4
-; RUN: grep "stdu r1, -16512(r1)" %t4
-; RUN: grep "addi r1, r1, 16512" %t4
+; RUN: grep "stdu r1, -16448(r1)" %t4
+; RUN: grep "addi r1, r1, 16448" %t4
; RUN: grep "ld r31, -8(r1)" %t4
define i32* @f1() {
diff --git a/test/CodeGen/PowerPC/atomic-1.ll b/test/CodeGen/PowerPC/atomic-1.ll
index 083df47..997a016 100644
--- a/test/CodeGen/PowerPC/atomic-1.ll
+++ b/test/CodeGen/PowerPC/atomic-1.ll
@@ -11,7 +11,8 @@ define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
define i32 @exchange_and_cmp(i32* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: lwarx
- %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic monotonic
+ %tmppair = cmpxchg i32* %mem, i32 0, i32 1 monotonic monotonic
+ %tmp = extractvalue { i32, i1 } %tmppair, 0
; CHECK: stwcx.
; CHECK: stwcx.
ret i32 %tmp
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index 261335e..843250f 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -11,7 +11,8 @@ define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
define i64 @exchange_and_cmp(i64* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: ldarx
- %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic monotonic
+ %tmppair = cmpxchg i64* %mem, i64 0, i64 1 monotonic monotonic
+ %tmp = extractvalue { i64, i1 } %tmppair, 0
; CHECK: stdcx.
; CHECK: stdcx.
ret i64 %tmp
diff --git a/test/CodeGen/PowerPC/early-ret2.ll b/test/CodeGen/PowerPC/early-ret2.ll
index a8e456f..1784777 100644
--- a/test/CodeGen/PowerPC/early-ret2.ll
+++ b/test/CodeGen/PowerPC/early-ret2.ll
@@ -11,7 +11,7 @@ while.body.lr.ph: ; preds = %entry
br i1 undef, label %while.end, label %while.body
while.body: ; preds = %while.body, %while.body.lr.ph
- br i1 false, label %while.end, label %while.body, !llvm.vectorizer.already_vectorized !0
+ br i1 false, label %while.end, label %while.body, !llvm.loop.vectorize.already_vectorized !0
while.end: ; preds = %while.body, %while.body.lr.ph, %entry
ret void
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
index db0d8ed..ac41e8c 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
@@ -116,18 +116,6 @@ entry:
ret void
}
-define void @fptoui_float_i64(float %a) nounwind ssp {
-entry:
-; ELF64: fptoui_float_i64
- %b.addr = alloca i64, align 4
- %conv = fptoui float %a to i64
-; ELF64: fctiduz
-; ELF64: stfd
-; ELF64: ld
- store i64 %conv, i64* %b.addr, align 4
- ret void
-}
-
define void @fptoui_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i32
@@ -140,14 +128,3 @@ entry:
ret void
}
-define void @fptoui_double_i64(double %a) nounwind ssp {
-entry:
-; ELF64: fptoui_double_i64
- %b.addr = alloca i64, align 8
- %conv = fptoui double %a to i64
-; ELF64: fctiduz
-; ELF64: stfd
-; ELF64: ld
- store i64 %conv, i64* %b.addr, align 8
- ret void
-}
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion.ll b/test/CodeGen/PowerPC/fast-isel-conversion.ll
index a31c312..5e00675 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion.ll
@@ -1,15 +1,24 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=970 | FileCheck %s --check-prefix=PPC970
+
+;; Tests for 970 don't use -fast-isel-abort because we intentionally punt
+;; to SelectionDAG in some cases.
; Test sitofp
define void @sitofp_single_i64(i64 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i64
+; PPC970: sitofp_single_i64
%b.addr = alloca float, align 4
%conv = sitofp i64 %a to float
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -17,11 +26,16 @@ entry:
define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i32
+; PPC970: sitofp_single_i32
%b.addr = alloca float, align 4
%conv = sitofp i32 %a to float
; ELF64: std
; ELF64: lfiwax
; ELF64: fcfids
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -29,12 +43,18 @@ entry:
define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i16
+; PPC970: sitofp_single_i16
%b.addr = alloca float, align 4
%conv = sitofp i16 %a to float
; ELF64: extsh
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: extsh
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -42,12 +62,18 @@ entry:
define void @sitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ELF64: sitofp_single_i8
+; PPC970: sitofp_single_i8
%b.addr = alloca float, align 4
%conv = sitofp i8 %a to float
; ELF64: extsb
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: extsb
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -55,11 +81,15 @@ entry:
define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i32
+; PPC970: sitofp_double_i32
%b.addr = alloca double, align 8
%conv = sitofp i32 %a to double
; ELF64: std
; ELF64: lfiwax
; ELF64: fcfid
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -67,11 +97,15 @@ entry:
define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i64
+; PPC970: sitofp_double_i64
%b.addr = alloca double, align 8
%conv = sitofp i64 %a to double
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -79,12 +113,17 @@ entry:
define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i16
+; PPC970: sitofp_double_i16
%b.addr = alloca double, align 8
%conv = sitofp i16 %a to double
; ELF64: extsh
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: extsh
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -92,12 +131,17 @@ entry:
define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i8
+; PPC970: sitofp_double_i8
%b.addr = alloca double, align 8
%conv = sitofp i8 %a to double
; ELF64: extsb
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: extsb
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -107,11 +151,13 @@ entry:
define void @uitofp_single_i64(i64 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i64
+; PPC970: uitofp_single_i64
%b.addr = alloca float, align 4
%conv = uitofp i64 %a to float
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970-NOT: fcfidus
store float %conv, float* %b.addr, align 4
ret void
}
@@ -119,11 +165,14 @@ entry:
define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i32
+; PPC970: uitofp_single_i32
%b.addr = alloca float, align 4
%conv = uitofp i32 %a to float
; ELF64: std
; ELF64: lfiwzx
; ELF64: fcfidus
+; PPC970-NOT: lfiwzx
+; PPC970-NOT: fcfidus
store float %conv, float* %b.addr, align 4
ret void
}
@@ -131,12 +180,18 @@ entry:
define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i16
+; PPC970: uitofp_single_i16
%b.addr = alloca float, align 4
%conv = uitofp i16 %a to float
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -144,12 +199,18 @@ entry:
define void @uitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ELF64: uitofp_single_i8
+; PPC970: uitofp_single_i8
%b.addr = alloca float, align 4
%conv = uitofp i8 %a to float
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -157,11 +218,13 @@ entry:
define void @uitofp_double_i64(i64 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i64
+; PPC970: uitofp_double_i64
%b.addr = alloca double, align 8
%conv = uitofp i64 %a to double
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970-NOT: fcfidu
store double %conv, double* %b.addr, align 8
ret void
}
@@ -169,11 +232,14 @@ entry:
define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i32
+; PPC970: uitofp_double_i32
%b.addr = alloca double, align 8
%conv = uitofp i32 %a to double
; ELF64: std
; ELF64: lfiwzx
; ELF64: fcfidu
+; PPC970-NOT: lfiwzx
+; PPC970-NOT: fcfidu
store double %conv, double* %b.addr, align 8
ret void
}
@@ -181,12 +247,17 @@ entry:
define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i16
+; PPC970: uitofp_double_i16
%b.addr = alloca double, align 8
%conv = uitofp i16 %a to double
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -194,12 +265,17 @@ entry:
define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i8
+; PPC970: uitofp_double_i8
%b.addr = alloca double, align 8
%conv = uitofp i8 %a to double
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -209,11 +285,15 @@ entry:
define void @fptosi_float_i32(float %a) nounwind ssp {
entry:
; ELF64: fptosi_float_i32
+; PPC970: fptosi_float_i32
%b.addr = alloca i32, align 4
%conv = fptosi float %a to i32
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
+; PPC970: fctiwz
+; PPC970: stfd
+; PPC970: lwa
store i32 %conv, i32* %b.addr, align 4
ret void
}
@@ -221,11 +301,15 @@ entry:
define void @fptosi_float_i64(float %a) nounwind ssp {
entry:
; ELF64: fptosi_float_i64
+; PPC970: fptosi_float_i64
%b.addr = alloca i64, align 4
%conv = fptosi float %a to i64
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: ld
store i64 %conv, i64* %b.addr, align 4
ret void
}
@@ -233,11 +317,15 @@ entry:
define void @fptosi_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptosi_double_i32
+; PPC970: fptosi_double_i32
%b.addr = alloca i32, align 8
%conv = fptosi double %a to i32
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
+; PPC970: fctiwz
+; PPC970: stfd
+; PPC970: lwa
store i32 %conv, i32* %b.addr, align 8
ret void
}
@@ -245,11 +333,15 @@ entry:
define void @fptosi_double_i64(double %a) nounwind ssp {
entry:
; ELF64: fptosi_double_i64
+; PPC970: fptosi_double_i64
%b.addr = alloca i64, align 8
%conv = fptosi double %a to i64
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: ld
store i64 %conv, i64* %b.addr, align 8
ret void
}
@@ -259,11 +351,15 @@ entry:
define void @fptoui_float_i32(float %a) nounwind ssp {
entry:
; ELF64: fptoui_float_i32
+; PPC970: fptoui_float_i32
%b.addr = alloca i32, align 4
%conv = fptoui float %a to i32
; ELF64: fctiwuz
; ELF64: stfd
; ELF64: lwz
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: lwz
store i32 %conv, i32* %b.addr, align 4
ret void
}
@@ -271,11 +367,13 @@ entry:
define void @fptoui_float_i64(float %a) nounwind ssp {
entry:
; ELF64: fptoui_float_i64
+; PPC970: fptoui_float_i64
%b.addr = alloca i64, align 4
%conv = fptoui float %a to i64
; ELF64: fctiduz
; ELF64: stfd
; ELF64: ld
+; PPC970-NOT: fctiduz
store i64 %conv, i64* %b.addr, align 4
ret void
}
@@ -283,11 +381,15 @@ entry:
define void @fptoui_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i32
+; PPC970: fptoui_double_i32
%b.addr = alloca i32, align 8
%conv = fptoui double %a to i32
; ELF64: fctiwuz
; ELF64: stfd
; ELF64: lwz
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: lwz
store i32 %conv, i32* %b.addr, align 8
ret void
}
@@ -295,11 +397,13 @@ entry:
define void @fptoui_double_i64(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i64
+; PPC970: fptoui_double_i64
%b.addr = alloca i64, align 8
%conv = fptoui double %a to i64
; ELF64: fctiduz
; ELF64: stfd
; ELF64: ld
+; PPC970-NOT: fctiduz
store i64 %conv, i64* %b.addr, align 8
ret void
}
diff --git a/test/CodeGen/PowerPC/func-addr.ll b/test/CodeGen/PowerPC/func-addr.ll
new file mode 100644
index 0000000..4533c62
--- /dev/null
+++ b/test/CodeGen/PowerPC/func-addr.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple powerpc64-linux < %s | FileCheck %s
+; RUN: llc -O0 -mtriple powerpc64-linux < %s | FileCheck %s
+
+define void @foo() {
+ ret void
+}
+declare i32 @bar(i8*)
+
+; CHECK-LABEL: {{^}}zed:
+; CHECK: addis 3, 2, foo@toc@ha
+; CHECK-NEXT: addi 3, 3, foo@toc@l
+; CHECK-NEXT: bl bar
+
+define void @zed() {
+ call i32 @bar(i8* bitcast (void ()* @foo to i8*))
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/hello-reloc.s b/test/CodeGen/PowerPC/hello-reloc.s
index 1e3fb8f..97dfbb5 100644
--- a/test/CodeGen/PowerPC/hello-reloc.s
+++ b/test/CodeGen/PowerPC/hello-reloc.s
@@ -62,17 +62,17 @@ L_.str: ; @.str
; DARWIN-G4-DUMP:AddressSize: 32bit
; DARWIN-G4-DUMP:Relocations [
; DARWIN-G4-DUMP: Section __text {
-; DARWIN-G4-DUMP: 0x34 1 2 0 PPC_RELOC_BR24 0 -
-; DARWIN-G4-DUMP: 0x30 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 _main
-; DARWIN-G4-DUMP: 0x2C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x60 0 2 n/a PPC_RELOC_PAIR 1 _main
+; DARWIN-G4-DUMP: 0x34 1 2 0 PPC_RELOC_BR24 0 0x3
+; DARWIN-G4-DUMP: 0x30 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x74
+; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x14
+; DARWIN-G4-DUMP: 0x2C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x74
+; DARWIN-G4-DUMP: 0x60 0 2 n/a PPC_RELOC_PAIR 1 0x14
; DARWIN-G4-DUMP: }
; DARWIN-G4-DUMP: Section __picsymbolstub1 {
-; DARWIN-G4-DUMP: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 _main
-; DARWIN-G4-DUMP: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x18 0 2 n/a PPC_RELOC_PAIR 1 _main
+; DARWIN-G4-DUMP: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x70
+; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x58
+; DARWIN-G4-DUMP: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x70
+; DARWIN-G4-DUMP: 0x18 0 2 n/a PPC_RELOC_PAIR 1 0x58
; DARWIN-G4-DUMP: }
; DARWIN-G4-DUMP: Section __la_symbol_ptr {
; DARWIN-G4-DUMP: 0x0 0 2 1 PPC_RELOC_VANILLA 0 dyld_stub_binding_helper
diff --git a/test/CodeGen/PowerPC/lit.local.cfg b/test/CodeGen/PowerPC/lit.local.cfg
index 2e46300..5d33887 100644
--- a/test/CodeGen/PowerPC/lit.local.cfg
+++ b/test/CodeGen/PowerPC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/PowerPC/ppc64-altivec-abi.ll b/test/CodeGen/PowerPC/ppc64-altivec-abi.ll
new file mode 100644
index 0000000..0bed329
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-altivec-abi.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mattr=+altivec | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Verify that in the 64-bit Linux ABI, vector arguments take up space
+; in the parameter save area.
+
+define i64 @callee(i64 %a, <4 x i32> %b, i64 %c, <4 x i32> %d, i64 %e) {
+entry:
+ ret i64 %e
+}
+; CHECK-LABEL: callee:
+; CHECK: ld 3, 112(1)
+
+define void @caller(i64 %x, <4 x i32> %y) {
+entry:
+ tail call void @test(i64 %x, <4 x i32> %y, i64 %x, <4 x i32> %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: caller:
+; CHECK: std 3, 112(1)
+
+declare void @test(i64, <4 x i32>, i64, <4 x i32>, i64)
+
diff --git a/test/CodeGen/PowerPC/ppc64-byval-align.ll b/test/CodeGen/PowerPC/ppc64-byval-align.ll
new file mode 100644
index 0000000..0e73cf2
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-byval-align.ll
@@ -0,0 +1,56 @@
+; RUN: llc -O1 < %s -march=ppc64 -mcpu=pwr7 | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.test = type { i64, [8 x i8] }
+%struct.pad = type { [8 x i64] }
+
+@gt = common global %struct.test zeroinitializer, align 16
+@gp = common global %struct.pad zeroinitializer, align 8
+
+define signext i32 @callee1(i32 signext %x, %struct.test* byval align 16 nocapture readnone %y, i32 signext %z) {
+entry:
+ ret i32 %z
+}
+; CHECK-LABEL: @callee1
+; CHECK: mr 3, 7
+; CHECK: blr
+
+declare signext i32 @test1(i32 signext, %struct.test* byval align 16, i32 signext)
+define void @caller1(i32 signext %z) {
+entry:
+ %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval align 16 @gt, i32 signext %z)
+ ret void
+}
+; CHECK-LABEL: @caller1
+; CHECK: mr [[REG:[0-9]+]], 3
+; CHECK: mr 7, [[REG]]
+; CHECK: bl test1
+
+define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
+entry:
+ %x1 = getelementptr inbounds %struct.test* %z, i64 0, i32 0
+ %0 = load i64* %x1, align 16
+ ret i64 %0
+}
+; CHECK-LABEL: @callee2
+; CHECK: ld [[REG:[0-9]+]], 128(1)
+; CHECK: mr 3, [[REG]]
+; CHECK: blr
+
+declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16)
+define void @caller2(i64 %z) {
+entry:
+ %tmp = alloca %struct.test, align 16
+ %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test* %tmp, i64 0, i32 0
+ store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
+ %call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp)
+ ret void
+}
+; CHECK-LABEL: @caller2
+; CHECK: std 3, [[OFF:[0-9]+]](1)
+; CHECK: ld [[REG:[0-9]+]], [[OFF]](1)
+; CHECK: std [[REG]], 128(1)
+; CHECK: bl test2
+
diff --git a/test/CodeGen/PowerPC/ppc64-calls.ll b/test/CodeGen/PowerPC/ppc64-calls.ll
index 1f3bb71..31794be 100644
--- a/test/CodeGen/PowerPC/ppc64-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64-calls.ll
@@ -42,12 +42,18 @@ define void @test_indirect(void ()* nocapture %fp) nounwind {
ret void
}
-; Absolute vales should be have the TOC restore 'nop'
+; Absolute values must use the regular indirect call sequence
+; The main purpose of this test is to ensure that BLA is not
+; used on 64-bit SVR4 (as e.g. on Darwin).
define void @test_abs() nounwind {
; CHECK-LABEL: test_abs:
tail call void inttoptr (i64 1024 to void ()*)() nounwind
-; CHECK: bla 1024
-; CHECK-NEXT: nop
+; CHECK: ld [[FP:[0-9]+]], 1024(0)
+; CHECK: ld 11, 1040(0)
+; CHECK: ld 2, 1032(0)
+; CHECK-NEXT: mtctr [[FP]]
+; CHECK-NEXT: bctrl
+; CHECK-NEXT: ld 2, 40(1)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-smallarg.ll b/test/CodeGen/PowerPC/ppc64-smallarg.ll
new file mode 100644
index 0000000..0d5b078
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-smallarg.ll
@@ -0,0 +1,59 @@
+; Verify that small structures and float arguments are passed in the
+; least significant part of a stack slot doubleword.
+
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.large_arg = type { [8 x i64] }
+%struct.small_arg = type { i16, i8 }
+
+@gl = common global %struct.large_arg zeroinitializer, align 8
+@gs = common global %struct.small_arg zeroinitializer, align 2
+@gf = common global float 0.000000e+00, align 4
+
+define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+entry:
+ %0 = bitcast %struct.small_arg* %x to i32*
+ %1 = bitcast %struct.small_arg* %agg.result to i32*
+ %2 = load i32* %0, align 2
+ store i32 %2, i32* %1, align 2
+ ret void
+}
+; CHECK: @callee1
+; CHECK: lwz {{[0-9]+}}, 124(1)
+; CHECK: blr
+
+define void @caller1() {
+entry:
+ %tmp = alloca %struct.small_arg, align 2
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ ret void
+}
+; CHECK: @caller1
+; CHECK: stw {{[0-9]+}}, 124(1)
+; CHECK: bl test1
+
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+
+define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
+entry:
+ ret float %x
+}
+; CHECK: @callee2
+; CHECK: lfs {{[0-9]+}}, 156(1)
+; CHECK: blr
+
+define void @caller2() {
+entry:
+ %0 = load float* @gf, align 4
+ %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
+ ret void
+}
+; CHECK: @caller2
+; CHECK: stfs {{[0-9]+}}, 156(1)
+; CHECK: bl test2
+
+declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
+
diff --git a/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
new file mode 100644
index 0000000..fcb1e92
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -0,0 +1,59 @@
+; Verify that small structures and float arguments are passed in the
+; least significant part of a stack slot doubleword.
+
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%struct.large_arg = type { [8 x i64] }
+%struct.small_arg = type { i16, i8 }
+
+@gl = common global %struct.large_arg zeroinitializer, align 8
+@gs = common global %struct.small_arg zeroinitializer, align 2
+@gf = common global float 0.000000e+00, align 4
+
+define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+entry:
+ %0 = bitcast %struct.small_arg* %x to i32*
+ %1 = bitcast %struct.small_arg* %agg.result to i32*
+ %2 = load i32* %0, align 2
+ store i32 %2, i32* %1, align 2
+ ret void
+}
+; CHECK: @callee1
+; CHECK: lwz {{[0-9]+}}, 120(1)
+; CHECK: blr
+
+define void @caller1() {
+entry:
+ %tmp = alloca %struct.small_arg, align 2
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ ret void
+}
+; CHECK: @caller1
+; CHECK: stw {{[0-9]+}}, 120(1)
+; CHECK: bl test1
+
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+
+define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
+entry:
+ ret float %x
+}
+; CHECK: @callee2
+; CHECK: lfs {{[0-9]+}}, 152(1)
+; CHECK: blr
+
+define void @caller2() {
+entry:
+ %0 = load float* @gf, align 4
+ %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
+ ret void
+}
+; CHECK: @caller2
+; CHECK: stfs {{[0-9]+}}, 152(1)
+; CHECK: bl test2
+
+declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
+
diff --git a/test/CodeGen/PowerPC/ppcf128-endian.ll b/test/CodeGen/PowerPC/ppcf128-endian.ll
new file mode 100644
index 0000000..2a5f13a
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppcf128-endian.ll
@@ -0,0 +1,154 @@
+; RUN: llc -mcpu=pwr7 -mattr=+altivec < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+@g = common global ppc_fp128 0xM00000000000000000000000000000000, align 16
+
+define void @callee(ppc_fp128 %x) {
+entry:
+ %x.addr = alloca ppc_fp128, align 16
+ store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
+ %0 = load ppc_fp128* %x.addr, align 16
+ store ppc_fp128 %0, ppc_fp128* @g, align 16
+ ret void
+}
+; CHECK: @callee
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: stfd 2, 8([[REG]])
+; CHECK: stfd 1, 0([[REG]])
+; CHECK: blr
+
+define void @caller() {
+entry:
+ %0 = load ppc_fp128* @g, align 16
+ call void @test(ppc_fp128 %0)
+ ret void
+}
+; CHECK: @caller
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: lfd 2, 8([[REG]])
+; CHECK: lfd 1, 0([[REG]])
+; CHECK: bl test
+
+declare void @test(ppc_fp128)
+
+define void @caller_const() {
+entry:
+ call void @test(ppc_fp128 0xM3FF00000000000000000000000000000)
+ ret void
+}
+; CHECK: .LCPI[[LC:[0-9]+]]_0:
+; CHECK: .long 1065353216
+; CHECK: .LCPI[[LC]]_1:
+; CHECK: .long 0
+; CHECK: @caller_const
+; CHECK: addi [[REG0:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_0
+; CHECK: addi [[REG1:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_1
+; CHECK: lfs 1, 0([[REG0]])
+; CHECK: lfs 2, 0([[REG1]])
+; CHECK: bl test
+
+define ppc_fp128 @result() {
+entry:
+ %0 = load ppc_fp128* @g, align 16
+ ret ppc_fp128 %0
+}
+; CHECK: @result
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: lfd 1, 0([[REG]])
+; CHECK: lfd 2, 8([[REG]])
+; CHECK: blr
+
+define void @use_result() {
+entry:
+ %call = tail call ppc_fp128 @test_result() #3
+ store ppc_fp128 %call, ppc_fp128* @g, align 16
+ ret void
+}
+; CHECK: @use_result
+; CHECK: bl test_result
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: stfd 2, 8([[REG]])
+; CHECK: stfd 1, 0([[REG]])
+; CHECK: blr
+
+declare ppc_fp128 @test_result()
+
+define void @caller_result() {
+entry:
+ %call = tail call ppc_fp128 @test_result()
+ tail call void @test(ppc_fp128 %call)
+ ret void
+}
+; CHECK: @caller_result
+; CHECK: bl test_result
+; CHECK-NEXT: nop
+; CHECK-NEXT: bl test
+; CHECK-NEXT: nop
+
+define i128 @convert_from(ppc_fp128 %x) {
+entry:
+ %0 = bitcast ppc_fp128 %x to i128
+ ret i128 %0
+}
+; CHECK: @convert_from
+; CHECK: stfd 1, [[OFF1:.*]](1)
+; CHECK: stfd 2, [[OFF2:.*]](1)
+; CHECK: ld 3, [[OFF1]](1)
+; CHECK: ld 4, [[OFF2]](1)
+; CHECK: blr
+
+define ppc_fp128 @convert_to(i128 %x) {
+entry:
+ %0 = bitcast i128 %x to ppc_fp128
+ ret ppc_fp128 %0
+}
+; CHECK: @convert_to
+; CHECK: std 3, [[OFF1:.*]](1)
+; CHECK: std 4, [[OFF2:.*]](1)
+; CHECK: lfd 1, [[OFF1]](1)
+; CHECK: lfd 2, [[OFF2]](1)
+; CHECK: blr
+
+define ppc_fp128 @convert_to2(i128 %x) {
+entry:
+ %shl = shl i128 %x, 1
+ %0 = bitcast i128 %shl to ppc_fp128
+ ret ppc_fp128 %0
+}
+
+; CHECK: @convert_to
+; CHECK: std 3, [[OFF1:.*]](1)
+; CHECK: std 4, [[OFF2:.*]](1)
+; CHECK: lfd 1, [[OFF1]](1)
+; CHECK: lfd 2, [[OFF2]](1)
+; CHECK: blr
+
+define double @convert_vector(<4 x i32> %x) {
+entry:
+ %cast = bitcast <4 x i32> %x to ppc_fp128
+ %conv = fptrunc ppc_fp128 %cast to double
+ ret double %conv
+}
+; CHECK: @convert_vector
+; CHECK: addi [[REG:[0-9]+]], 1, [[OFF:.*]]
+; CHECK: stvx 2, 0, [[REG]]
+; CHECK: lfd 1, [[OFF]](1)
+; CHECK: blr
+
+declare void @llvm.va_start(i8*)
+
+define double @vararg(i32 %a, ...) {
+entry:
+ %va = alloca i8*, align 8
+ %va1 = bitcast i8** %va to i8*
+ call void @llvm.va_start(i8* %va1)
+ %arg = va_arg i8** %va, ppc_fp128
+ %conv = fptrunc ppc_fp128 %arg to double
+ ret double %conv
+}
+; CHECK: @vararg
+; CHECK: lfd 1, 0({{[0-9]+}})
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/resolvefi-basereg.ll b/test/CodeGen/PowerPC/resolvefi-basereg.ll
new file mode 100644
index 0000000..62c2d13
--- /dev/null
+++ b/test/CodeGen/PowerPC/resolvefi-basereg.ll
@@ -0,0 +1,362 @@
+; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
+
+; Due to a bug in resolveFrameIndex we ended up with invalid addresses
+; containing a base register 0. Verify that this no longer happens.
+; CHECK-NOT: (0)
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
+%struct.S1998 = type { [2 x i32*], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
+%struct.anon = type { [16 x double], i32, i16, i32, [3 x i8], [6 x i8], [4 x i32], i8 }
+
+@info = global %struct.Info zeroinitializer, align 8
+@fails = global i32 0, align 4
+@intarray = global [256 x i32] zeroinitializer, align 4
+@s1998 = global %struct.S1998 zeroinitializer, align 16
+@a1998 = external global [5 x %struct.S1998]
+
+define void @test1998() {
+entry:
+ %i = alloca i32, align 4
+ %j = alloca i32, align 4
+ %tmp = alloca i32, align 4
+ %agg.tmp = alloca %struct.S1998, align 16
+ %agg.tmp111 = alloca %struct.S1998, align 16
+ %agg.tmp112 = alloca %struct.S1998, align 16
+ %agg.tmp113 = alloca %struct.S1998, align 16
+ %agg.tmp114 = alloca %struct.S1998, align 16
+ %agg.tmp115 = alloca %struct.S1998, align 16
+ %agg.tmp116 = alloca %struct.S1998, align 16
+ %agg.tmp117 = alloca %struct.S1998, align 16
+ %agg.tmp118 = alloca %struct.S1998, align 16
+ %agg.tmp119 = alloca %struct.S1998, align 16
+ call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i32 8, i1 false)
+ store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 2), align 8
+ store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 3), align 8
+ store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 4), align 8
+ store i64 5168, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 6), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 9), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 10), align 8
+ %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+ %sub = sub i64 %0, 1
+ %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
+ %tobool = icmp ne i64 %and, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %1 = load i32* @fails, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, i32* @fails, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ store i32 0, i32* %i, align 4
+ store i32 0, i32* %j, align 4
+ %2 = load i32* %i, align 4
+ %idxprom = sext i32 %2 to i64
+ %arrayidx = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
+ store i8* bitcast (i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
+ %3 = load i32* %i, align 4
+ %idxprom1 = sext i32 %3 to i64
+ %arrayidx2 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
+ store i64 8, i64* %arrayidx2, align 8
+ %4 = load i32* %i, align 4
+ %idxprom3 = sext i32 %4 to i64
+ %arrayidx4 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
+ store i64 8, i64* %arrayidx4, align 8
+ store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
+ store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
+ %5 = load i32* %i, align 4
+ %inc5 = add nsw i32 %5, 1
+ store i32 %inc5, i32* %i, align 4
+ %6 = load i32* %i, align 4
+ %idxprom6 = sext i32 %6 to i64
+ %arrayidx7 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
+ %7 = load i32* %i, align 4
+ %idxprom8 = sext i32 %7 to i64
+ %arrayidx9 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
+ store i64 8, i64* %arrayidx9, align 8
+ %8 = load i32* %i, align 4
+ %idxprom10 = sext i32 %8 to i64
+ %arrayidx11 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
+ store i64 8, i64* %arrayidx11, align 8
+ store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1), align 8
+ store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
+ %9 = load i32* %i, align 4
+ %inc12 = add nsw i32 %9, 1
+ store i32 %inc12, i32* %i, align 4
+ %10 = load i32* %i, align 4
+ %idxprom13 = sext i32 %10 to i64
+ %arrayidx14 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
+ %11 = load i32* %i, align 4
+ %idxprom15 = sext i32 %11 to i64
+ %arrayidx16 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
+ store i64 8, i64* %arrayidx16, align 8
+ %12 = load i32* %i, align 4
+ %idxprom17 = sext i32 %12 to i64
+ %arrayidx18 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
+ store i64 8, i64* %arrayidx18, align 8
+ store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2), align 8
+ store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
+ %13 = load i32* %i, align 4
+ %inc19 = add nsw i32 %13, 1
+ store i32 %inc19, i32* %i, align 4
+ %14 = load i32* %i, align 4
+ %idxprom20 = sext i32 %14 to i64
+ %arrayidx21 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
+ %15 = load i32* %i, align 4
+ %idxprom22 = sext i32 %15 to i64
+ %arrayidx23 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
+ store i64 8, i64* %arrayidx23, align 8
+ %16 = load i32* %i, align 4
+ %idxprom24 = sext i32 %16 to i64
+ %arrayidx25 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
+ store i64 16, i64* %arrayidx25, align 8
+ store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3), align 16
+ store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
+ %17 = load i32* %i, align 4
+ %inc26 = add nsw i32 %17, 1
+ store i32 %inc26, i32* %i, align 4
+ %18 = load i32* %i, align 4
+ %idxprom27 = sext i32 %18 to i64
+ %arrayidx28 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
+ %19 = load i32* %i, align 4
+ %idxprom29 = sext i32 %19 to i64
+ %arrayidx30 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
+ store i64 2, i64* %arrayidx30, align 8
+ %20 = load i32* %i, align 4
+ %idxprom31 = sext i32 %20 to i64
+ %arrayidx32 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
+ store i64 2, i64* %arrayidx32, align 8
+ store i16 -15897, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4), align 2
+ store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
+ %21 = load i32* %i, align 4
+ %inc33 = add nsw i32 %21, 1
+ store i32 %inc33, i32* %i, align 4
+ store i32 -419541644, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 5), align 4
+ store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
+ %22 = load i32* %j, align 4
+ %inc34 = add nsw i32 %22, 1
+ store i32 %inc34, i32* %j, align 4
+ %23 = load i32* %i, align 4
+ %idxprom35 = sext i32 %23 to i64
+ %arrayidx36 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
+ %24 = load i32* %i, align 4
+ %idxprom37 = sext i32 %24 to i64
+ %arrayidx38 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
+ store i64 8, i64* %arrayidx38, align 8
+ %25 = load i32* %i, align 4
+ %idxprom39 = sext i32 %25 to i64
+ %arrayidx40 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
+ store i64 8, i64* %arrayidx40, align 8
+ store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
+ store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
+ %26 = load i32* %i, align 4
+ %inc41 = add nsw i32 %26, 1
+ store i32 %inc41, i32* %i, align 4
+ %bf.load = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.clear = and i32 %bf.load, 7
+ %bf.set = or i32 %bf.clear, 16
+ store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.load42 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %bf.clear43 = and i32 %bf.load42, 7
+ %bf.set44 = or i32 %bf.clear43, 24
+ store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %27 = load i32* %j, align 4
+ %inc45 = add nsw i32 %27, 1
+ store i32 %inc45, i32* %j, align 4
+ %bf.load46 = load i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.clear47 = and i16 %bf.load46, 127
+ store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.load48 = load i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %bf.clear49 = and i16 %bf.load48, 127
+ store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %28 = load i32* %j, align 4
+ %inc50 = add nsw i32 %28, 1
+ store i32 %inc50, i32* %j, align 4
+ %bf.load51 = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.clear52 = and i32 %bf.load51, 63
+ store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.load53 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %bf.clear54 = and i32 %bf.load53, 63
+ %bf.set55 = or i32 %bf.clear54, 64
+ store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %29 = load i32* %j, align 4
+ %inc56 = add nsw i32 %29, 1
+ store i32 %inc56, i32* %j, align 4
+ %bf.load57 = load i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.clear58 = and i24 %bf.load57, 63
+ store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.load59 = load i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.clear60 = and i24 %bf.load59, 63
+ store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ %30 = load i32* %j, align 4
+ %inc61 = add nsw i32 %30, 1
+ store i32 %inc61, i32* %j, align 4
+ %31 = load i32* %i, align 4
+ %idxprom62 = sext i32 %31 to i64
+ %arrayidx63 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
+ %32 = load i32* %i, align 4
+ %idxprom64 = sext i32 %32 to i64
+ %arrayidx65 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
+ store i64 1, i64* %arrayidx65, align 8
+ %33 = load i32* %i, align 4
+ %idxprom66 = sext i32 %33 to i64
+ %arrayidx67 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
+ store i64 1, i64* %arrayidx67, align 8
+ store i8 -83, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
+ store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
+ %34 = load i32* %i, align 4
+ %inc68 = add nsw i32 %34, 1
+ store i32 %inc68, i32* %i, align 4
+ %35 = load i32* %i, align 4
+ %idxprom69 = sext i32 %35 to i64
+ %arrayidx70 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
+ %36 = load i32* %i, align 4
+ %idxprom71 = sext i32 %36 to i64
+ %arrayidx72 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
+ store i64 1, i64* %arrayidx72, align 8
+ %37 = load i32* %i, align 4
+ %idxprom73 = sext i32 %37 to i64
+ %arrayidx74 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
+ store i64 1, i64* %arrayidx74, align 8
+ store i8 34, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
+ store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
+ %38 = load i32* %i, align 4
+ %inc75 = add nsw i32 %38, 1
+ store i32 %inc75, i32* %i, align 4
+ %39 = load i32* %i, align 4
+ %idxprom76 = sext i32 %39 to i64
+ %arrayidx77 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
+ %40 = load i32* %i, align 4
+ %idxprom78 = sext i32 %40 to i64
+ %arrayidx79 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
+ store i64 4, i64* %arrayidx79, align 8
+ %41 = load i32* %i, align 4
+ %idxprom80 = sext i32 %41 to i64
+ %arrayidx81 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
+ store i64 4, i64* %arrayidx81, align 8
+ store i32 -3, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
+ store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
+ %42 = load i32* %i, align 4
+ %inc82 = add nsw i32 %42, 1
+ store i32 %inc82, i32* %i, align 4
+ %43 = load i32* %i, align 4
+ %idxprom83 = sext i32 %43 to i64
+ %arrayidx84 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
+ %44 = load i32* %i, align 4
+ %idxprom85 = sext i32 %44 to i64
+ %arrayidx86 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
+ store i64 1, i64* %arrayidx86, align 8
+ %45 = load i32* %i, align 4
+ %idxprom87 = sext i32 %45 to i64
+ %arrayidx88 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
+ store i64 1, i64* %arrayidx88, align 8
+ store i8 106, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
+ store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
+ %46 = load i32* %i, align 4
+ %inc89 = add nsw i32 %46, 1
+ store i32 %inc89, i32* %i, align 4
+ %47 = load i32* %i, align 4
+ %idxprom90 = sext i32 %47 to i64
+ %arrayidx91 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
+ %48 = load i32* %i, align 4
+ %idxprom92 = sext i32 %48 to i64
+ %arrayidx93 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
+ store i64 2, i64* %arrayidx93, align 8
+ %49 = load i32* %i, align 4
+ %idxprom94 = sext i32 %49 to i64
+ %arrayidx95 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
+ store i64 2, i64* %arrayidx95, align 8
+ store i16 29665, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7), align 2
+ store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
+ %50 = load i32* %i, align 4
+ %inc96 = add nsw i32 %50, 1
+ store i32 %inc96, i32* %i, align 4
+ %51 = load i32* %i, align 4
+ %idxprom97 = sext i32 %51 to i64
+ %arrayidx98 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
+ %52 = load i32* %i, align 4
+ %idxprom99 = sext i32 %52 to i64
+ %arrayidx100 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
+ store i64 1, i64* %arrayidx100, align 8
+ %53 = load i32* %i, align 4
+ %idxprom101 = sext i32 %53 to i64
+ %arrayidx102 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
+ store i64 1, i64* %arrayidx102, align 8
+ store i8 52, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), align 1
+ store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
+ %54 = load i32* %i, align 4
+ %inc103 = add nsw i32 %54, 1
+ store i32 %inc103, i32* %i, align 4
+ %55 = load i32* %i, align 4
+ %idxprom104 = sext i32 %55 to i64
+ %arrayidx105 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
+ %56 = load i32* %i, align 4
+ %idxprom106 = sext i32 %56 to i64
+ %arrayidx107 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
+ store i64 4, i64* %arrayidx107, align 8
+ %57 = load i32* %i, align 4
+ %idxprom108 = sext i32 %57 to i64
+ %arrayidx109 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
+ store i64 4, i64* %arrayidx109, align 8
+ store i32 -54118453, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9), align 4
+ store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
+ %58 = load i32* %i, align 4
+ %inc110 = add nsw i32 %58, 1
+ store i32 %inc110, i32* %i, align 4
+ store i32 %inc110, i32* %tmp
+ %59 = load i32* %tmp
+ %60 = load i32* %i, align 4
+ store i32 %60, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 0), align 4
+ %61 = load i32* %j, align 4
+ store i32 %61, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 1), align 4
+ %62 = bitcast %struct.S1998* %agg.tmp111 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %63 = bitcast %struct.S1998* %agg.tmp112 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
+ call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp)
+ %64 = bitcast %struct.S1998* %agg.tmp113 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %65 = bitcast %struct.S1998* %agg.tmp114 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ %66 = bitcast %struct.S1998* %agg.tmp115 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void (i32, ...)* @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
+ %67 = bitcast %struct.S1998* %agg.tmp116 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %68 = bitcast %struct.S1998* %agg.tmp117 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %68, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %69 = bitcast %struct.S1998* %agg.tmp118 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ %70 = bitcast %struct.S1998* %agg.tmp119 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ call void (i32, ...)* @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+
+declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16)
+declare void @check1998va(i32 signext, ...)
+declare void @checkx1998(%struct.S1998* byval align 16 %arg)
+
diff --git a/test/CodeGen/PowerPC/svr4-redzone.ll b/test/CodeGen/PowerPC/svr4-redzone.ll
index 7c51b67..bee3ac3 100644
--- a/test/CodeGen/PowerPC/svr4-redzone.ll
+++ b/test/CodeGen/PowerPC/svr4-redzone.ll
@@ -36,4 +36,4 @@ entry:
; PPC32: stwu 1, -240(1)
; PPC64-LABEL: bigstack:
-; PPC64: stdu 1, -352(1)
+; PPC64: stdu 1, -288(1)
diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll
index 4bce8c8..2733089 100644
--- a/test/CodeGen/PowerPC/vec_cmp.ll
+++ b/test/CodeGen/PowerPC/vec_cmp.ll
@@ -36,7 +36,7 @@ define <8 x i8> @v8si8_cmp(<8 x i8> %x, <8 x i8> %y) nounwind readnone {
; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v16i8 since it is a altivec native type
+; Additional tests for v16i8 since it is a altivec native type
define <16 x i8> @v16si8_cmp_eq(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
%cmp = icmp eq <16 x i8> %x, %y
@@ -165,7 +165,7 @@ define <4 x i16> @v4si16_cmp(<4 x i16> %x, <4 x i16> %y) nounwind readnone {
; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v8i16 since it is an altivec native type
+; Additional tests for v8i16 since it is an altivec native type
define <8 x i16> @v8si16_cmp_eq(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -298,7 +298,7 @@ define <2 x i32> @v2si32_cmp(<2 x i32> %x, <2 x i32> %y) nounwind readnone {
; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v4si32 since it is an altivec native type
+; Additional tests for v4si32 since it is an altivec native type
define <4 x i32> @v4si32_cmp_eq(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -449,7 +449,7 @@ entry:
; CHECK: vcmpeqfp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v4f32 since it is a altivec native type
+; Additional tests for v4f32 since it is a altivec native type
define <4 x float> @v4f32_cmp_eq(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
diff --git a/test/CodeGen/PowerPC/vec_misaligned.ll b/test/CodeGen/PowerPC/vec_misaligned.ll
index d7ed64a..304a84d 100644
--- a/test/CodeGen/PowerPC/vec_misaligned.ll
+++ b/test/CodeGen/PowerPC/vec_misaligned.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
+; RUN: llc < %s -march=ppc32 -mcpu=g5 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin8"
@@ -8,6 +10,8 @@ target triple = "powerpc-apple-darwin8"
define void @foo(i32 %x, ...) {
entry:
+; CHECK: foo:
+; CHECK-LE: foo:
%x_addr = alloca i32 ; <i32*> [#uses=1]
%ap = alloca i8* ; <i8**> [#uses=3]
%ap.0 = alloca i8* ; <i8**> [#uses=3]
@@ -27,6 +31,10 @@ entry:
%tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
%tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
%tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
+; CHECK: lvsl
+; CHECK: vperm
+; CHECK-LE: lvsr
+; CHECK-LE: vperm
store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
br label %return
diff --git a/test/CodeGen/PowerPC/vec_mul.ll b/test/CodeGen/PowerPC/vec_mul.ll
index c376751..8a44815 100644
--- a/test/CodeGen/PowerPC/vec_mul.ll
+++ b/test/CodeGen/PowerPC/vec_mul.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
%tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
@@ -9,6 +11,9 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
; CHECK-LABEL: test_v4i32:
; CHECK: vmsumuhm
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v4i32:
+; CHECK-LE: vmsumuhm
+; CHECK-LE-NOT: mullw
define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
%tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
@@ -19,6 +24,9 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
; CHECK-LABEL: test_v8i16:
; CHECK: vmladduhm
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v8i16:
+; CHECK-LE: vmladduhm
+; CHECK-LE-NOT: mullw
define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
%tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
@@ -30,6 +38,11 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
; CHECK: vmuloub
; CHECK: vmuleub
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v16i8:
+; CHECK-LE: vmuloub [[REG1:[0-9]+]]
+; CHECK-LE: vmuleub [[REG2:[0-9]+]]
+; CHECK-LE: vperm {{[0-9]+}}, [[REG2]], [[REG1]]
+; CHECK-LE-NOT: mullw
define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
%tmp = load <4 x float>* %X
@@ -44,3 +57,7 @@ define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
; CHECK: vspltisw [[ZNEG:[0-9]+]], -1
; CHECK: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]]
; CHECK: vmaddfp
+; CHECK-LE-LABEL: test_float:
+; CHECK-LE: vspltisw [[ZNEG:[0-9]+]], -1
+; CHECK-LE: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]]
+; CHECK-LE: vmaddfp
diff --git a/test/CodeGen/PowerPC/vec_shuffle_le.ll b/test/CodeGen/PowerPC/vec_shuffle_le.ll
new file mode 100644
index 0000000..635721c
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_shuffle_le.ll
@@ -0,0 +1,191 @@
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s
+
+define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VPKUHUM_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK: vpkuhum
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUHUM_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VPKUHUM_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+; CHECK: vpkuhum
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VPKUWUM_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
+; CHECK: vpkuwum
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUWUM_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VPKUWUM_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
+; CHECK: vpkuwum
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLB_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+; CHECK: vmrglb
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLB_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLB_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+; CHECK: vmrglb
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHB_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK: vmrghb
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHB_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHB_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
+; CHECK: vmrghb
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLH_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
+; CHECK: vmrglh
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLH_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLH_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
+; CHECK: vmrglh
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHH_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
+; CHECK: vmrghh
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHH_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHH_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
+; CHECK: vmrghh
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLW_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
+; CHECK: vmrglw
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLW_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLW_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+; CHECK: vmrglw
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHW_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
+; CHECK: vmrghw
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHW_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHW_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
+; CHECK: vmrghw
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VSLDOI_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
+; CHECK: vsldoi
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VSLDOI_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VSLDOI_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
+; CHECK: vsldoi
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/vperm-instcombine.ll b/test/CodeGen/PowerPC/vperm-instcombine.ll
new file mode 100644
index 0000000..d9084c8
--- /dev/null
+++ b/test/CodeGen/PowerPC/vperm-instcombine.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define <16 x i8> @foo() nounwind ssp {
+; CHECK: @foo
+;; Arguments are {0,1,...,15},{16,17,...,31},{30,28,26,...,0}
+ %1 = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> <i32 50462976, i32 117835012, i32 185207048, i32 252579084>, <4 x i32> <i32 319951120, i32 387323156, i32 454695192, i32 522067228>, <16 x i8> <i8 30, i8 28, i8 26, i8 24, i8 22, i8 20, i8 18, i8 16, i8 14, i8 12, i8 10, i8 8, i8 6, i8 4, i8 2, i8 0>)
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+;; Revised arguments are {16,17,...31},{0,1,...,15},{1,3,5,...,31}
+;; optimized into the following:
+; CHECK: ret <16 x i8> <i8 17, i8 19, i8 21, i8 23, i8 25, i8 27, i8 29, i8 31, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>
+}
+
+declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
diff --git a/test/CodeGen/PowerPC/vperm-lowering.ll b/test/CodeGen/PowerPC/vperm-lowering.ll
new file mode 100644
index 0000000..d55d26c
--- /dev/null
+++ b/test/CodeGen/PowerPC/vperm-lowering.ll
@@ -0,0 +1,66 @@
+; RUN: llc -O0 -fast-isel=false -mcpu=ppc64 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define <16 x i8> @foo() nounwind ssp {
+ %1 = shufflevector <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 1, i32 6, i32 11>
+ ret <16 x i8> %1
+}
+
+; CHECK: .LCPI0_0:
+; CHECK: .byte 31
+; CHECK: .byte 26
+; CHECK: .byte 21
+; CHECK: .byte 16
+; CHECK: .byte 11
+; CHECK: .byte 6
+; CHECK: .byte 1
+; CHECK: .byte 28
+; CHECK: .byte 23
+; CHECK: .byte 18
+; CHECK: .byte 13
+; CHECK: .byte 8
+; CHECK: .byte 3
+; CHECK: .byte 30
+; CHECK: .byte 25
+; CHECK: .byte 20
+; CHECK: .LCPI0_1:
+; CHECK: .byte 0
+; CHECK: .byte 1
+; CHECK: .byte 2
+; CHECK: .byte 3
+; CHECK: .byte 4
+; CHECK: .byte 5
+; CHECK: .byte 6
+; CHECK: .byte 7
+; CHECK: .byte 8
+; CHECK: .byte 9
+; CHECK: .byte 10
+; CHECK: .byte 11
+; CHECK: .byte 12
+; CHECK: .byte 13
+; CHECK: .byte 14
+; CHECK: .byte 15
+; CHECK: .LCPI0_2:
+; CHECK: .byte 16
+; CHECK: .byte 17
+; CHECK: .byte 18
+; CHECK: .byte 19
+; CHECK: .byte 20
+; CHECK: .byte 21
+; CHECK: .byte 22
+; CHECK: .byte 23
+; CHECK: .byte 24
+; CHECK: .byte 25
+; CHECK: .byte 26
+; CHECK: .byte 27
+; CHECK: .byte 28
+; CHECK: .byte 29
+; CHECK: .byte 30
+; CHECK: .byte 31
+; CHECK: foo:
+; CHECK: addis [[REG1:[0-9]+]], 2, .LCPI0_2@toc@ha
+; CHECK: addi [[REG2:[0-9]+]], [[REG1]], .LCPI0_2@toc@l
+; CHECK: lvx [[REG3:[0-9]+]], 0, [[REG2]]
+; CHECK: vperm {{[0-9]+}}, [[REG3]], {{[0-9]+}}, {{[0-9]+}}
diff --git a/test/CodeGen/R600/add_i64.ll b/test/CodeGen/R600/add_i64.ll
index c9eaeda..f733d90 100644
--- a/test/CodeGen/R600/add_i64.ll
+++ b/test/CodeGen/R600/add_i64.ll
@@ -70,9 +70,9 @@ define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> add
}
; SI-LABEL: @trunc_i64_add_to_i32
-; SI: S_LOAD_DWORD [[SREG0:s[0-9]+]],
-; SI: S_LOAD_DWORD [[SREG1:s[0-9]+]],
-; SI: S_ADD_I32 [[SRESULT:s[0-9]+]], [[SREG1]], [[SREG0]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG0:[0-9]+]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG1:[0-9]+]]
+; SI: S_ADD_I32 [[SRESULT:s[0-9]+]], s[[SREG1]], s[[SREG0]]
; SI-NOT: ADDC
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; SI: BUFFER_STORE_DWORD [[VRESULT]],
diff --git a/test/CodeGen/R600/and.ll b/test/CodeGen/R600/and.ll
index ee9bc83..cf11481 100644
--- a/test/CodeGen/R600/and.ll
+++ b/test/CodeGen/R600/and.ll
@@ -1,13 +1,12 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-;EG-CHECK: @test2
-;EG-CHECK: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test2
+; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test2
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +17,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @test4
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test4
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test4
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -38,3 +36,75 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @s_and_i32
+; SI: S_AND_B32
+define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %and = and i32 %a, %b
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_and_constant_i32
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
+define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
+ %and = and i32 %a, 1234567
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_and_i32
+; SI: V_AND_B32
+define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %and = and i32 %a, %b
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_and_constant_i32
+; SI: V_AND_B32
+define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %and = and i32 %a, 1234567
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_and_i64
+; SI: S_AND_B64
+define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %and = and i64 %a, %b
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_and_constant_i64
+; SI: S_AND_B64
+define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+ %and = and i64 %a, 281474976710655
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_and_i64
+; SI: V_AND_B32
+; SI: V_AND_B32
+define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %b = load i64 addrspace(1)* %bptr, align 8
+ %and = and i64 %a, %b
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_and_constant_i64
+; SI: V_AND_B32
+; SI: V_AND_B32
+define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %and = and i64 %a, 1234567
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/array-ptr-calc-i32.ll b/test/CodeGen/R600/array-ptr-calc-i32.ll
index c2362da..3230353 100644
--- a/test/CodeGen/R600/array-ptr-calc-i32.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i32.ll
@@ -10,7 +10,12 @@ declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
; SI-LABEL: @test_private_array_ptr_calc:
; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
-; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
+;
+; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
+; alloca to a vector. It currently fails because it does not know how
+; to interpret:
+; getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+; SI: DS_WRITE_B32 {{v[0-9]+}}, [[PTRREG]]
define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
%alloca = alloca [4 x i32], i32 4, align 16
%tid = call i32 @llvm.SI.tid() readnone
diff --git a/test/CodeGen/R600/atomic_cmp_swap_local.ll b/test/CodeGen/R600/atomic_cmp_swap_local.ll
new file mode 100644
index 0000000..eb9539e
--- /dev/null
+++ b/test/CodeGen/R600/atomic_cmp_swap_local.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i32_offset:
+; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
+; SI: DS_CMPST_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]], 0x10, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i64_offset:
+; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: S_MOV_B64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
+; SI-DAG: V_MOV_B32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
+; SI-DAG: V_MOV_B32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
+; SI: DS_CMPST_RTN_B64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}}, 0x20, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/atomic_load_add.ll b/test/CodeGen/R600/atomic_load_add.ll
index cb0242c..c26f9cd 100644
--- a/test/CodeGen/R600/atomic_load_add.ll
+++ b/test/CodeGen/R600/atomic_load_add.ll
@@ -1,23 +1,38 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @atomic_add_local
-; R600-CHECK: LDS_ADD *
-; SI-CHECK-LABEL: @atomic_add_local
-; SI-CHECK: DS_ADD_U32_RTN
+; FUNC-LABEL: @atomic_add_local
+; R600: LDS_ADD *
+; SI: DS_ADD_RTN_U32
define void @atomic_add_local(i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
+ %unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
-; R600-CHECK-LABEL: @atomic_add_ret_local
-; R600-CHECK: LDS_ADD_RET *
-; SI-CHECK-LABEL: @atomic_add_ret_local
-; SI-CHECK: DS_ADD_U32_RTN
+; FUNC-LABEL: @atomic_add_local_const_offset
+; R600: LDS_ADD *
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: @atomic_add_ret_local
+; R600: LDS_ADD_RET *
+; SI: DS_ADD_RTN_U32
define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
- store i32 %0, i32 addrspace(1)* %out
+ %val = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @atomic_add_ret_local_const_offset
+; R600: LDS_ADD_RET *
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
+define void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/atomic_load_sub.ll b/test/CodeGen/R600/atomic_load_sub.ll
index 7c26e52..3569d91 100644
--- a/test/CodeGen/R600/atomic_load_sub.ll
+++ b/test/CodeGen/R600/atomic_load_sub.ll
@@ -1,23 +1,38 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @atomic_sub_local
-; R600-CHECK: LDS_SUB *
-; SI-CHECK-LABEL: @atomic_sub_local
-; SI-CHECK: DS_SUB_U32_RTN
+; FUNC-LABEL: @atomic_sub_local
+; R600: LDS_SUB *
+; SI: DS_SUB_RTN_U32
define void @atomic_sub_local(i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
+ %unused = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
-; R600-CHECK-LABEL: @atomic_sub_ret_local
-; R600-CHECK: LDS_SUB_RET *
-; SI-CHECK-LABEL: @atomic_sub_ret_local
-; SI-CHECK: DS_SUB_U32_RTN
+; FUNC-LABEL: @atomic_sub_local_const_offset
+; R600: LDS_SUB *
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: @atomic_sub_ret_local
+; R600: LDS_SUB_RET *
+; SI: DS_SUB_RTN_U32
define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
- store i32 %0, i32 addrspace(1)* %out
+ %val = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @atomic_sub_ret_local_const_offset
+; R600: LDS_SUB_RET *
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
+define void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/big_alu.ll b/test/CodeGen/R600/big_alu.ll
index 6b68376..511e8ef 100644
--- a/test/CodeGen/R600/big_alu.ll
+++ b/test/CodeGen/R600/big_alu.ll
@@ -101,7 +101,7 @@ IF137: ; preds = %main_body
%88 = insertelement <4 x float> %87, float %32, i32 2
%89 = insertelement <4 x float> %88, float 0.000000e+00, i32 3
%90 = call float @llvm.AMDGPU.dp4(<4 x float> %85, <4 x float> %89)
- %91 = call float @llvm.AMDGPU.rsq(float %90)
+ %91 = call float @llvm.AMDGPU.rsq.f32(float %90)
%92 = fmul float %30, %91
%93 = fmul float %31, %91
%94 = fmul float %32, %91
@@ -344,7 +344,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%325 = insertelement <4 x float> %324, float %318, i32 2
%326 = insertelement <4 x float> %325, float 0.000000e+00, i32 3
%327 = call float @llvm.AMDGPU.dp4(<4 x float> %322, <4 x float> %326)
- %328 = call float @llvm.AMDGPU.rsq(float %327)
+ %328 = call float @llvm.AMDGPU.rsq.f32(float %327)
%329 = fmul float %314, %328
%330 = fmul float %316, %328
%331 = fmul float %318, %328
@@ -377,7 +377,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%358 = insertelement <4 x float> %357, float %45, i32 2
%359 = insertelement <4 x float> %358, float 0.000000e+00, i32 3
%360 = call float @llvm.AMDGPU.dp4(<4 x float> %355, <4 x float> %359)
- %361 = call float @llvm.AMDGPU.rsq(float %360)
+ %361 = call float @llvm.AMDGPU.rsq.f32(float %360)
%362 = fmul float %45, %361
%363 = call float @fabs(float %362)
%364 = fmul float %176, 0x3FECCCCCC0000000
@@ -403,7 +403,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%384 = insertelement <4 x float> %383, float %45, i32 2
%385 = insertelement <4 x float> %384, float 0.000000e+00, i32 3
%386 = call float @llvm.AMDGPU.dp4(<4 x float> %381, <4 x float> %385)
- %387 = call float @llvm.AMDGPU.rsq(float %386)
+ %387 = call float @llvm.AMDGPU.rsq.f32(float %386)
%388 = fmul float %45, %387
%389 = call float @fabs(float %388)
%390 = fmul float %176, 0x3FF51EB860000000
@@ -1041,7 +1041,7 @@ IF179: ; preds = %ENDIF175
%896 = insertelement <4 x float> %895, float %45, i32 2
%897 = insertelement <4 x float> %896, float 0.000000e+00, i32 3
%898 = call float @llvm.AMDGPU.dp4(<4 x float> %893, <4 x float> %897)
- %899 = call float @llvm.AMDGPU.rsq(float %898)
+ %899 = call float @llvm.AMDGPU.rsq.f32(float %898)
%900 = fmul float %45, %899
%901 = call float @fabs(float %900)
%902 = fmul float %176, 0x3FECCCCCC0000000
@@ -1150,7 +1150,7 @@ ENDIF178: ; preds = %ENDIF175, %IF179
declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
+declare float @llvm.AMDGPU.rsq.f32(float) #1
; Function Attrs: readnone
declare <4 x float> @llvm.AMDGPU.tex(<4 x float>, i32, i32, i32) #1
diff --git a/test/CodeGen/R600/bitcast.ll b/test/CodeGen/R600/bitcast.ll
index 5bfc008..0be79e6 100644
--- a/test/CodeGen/R600/bitcast.ll
+++ b/test/CodeGen/R600/bitcast.ll
@@ -1,9 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; This test just checks that the compiler doesn't crash.
-; CHECK-LABEL: @v32i8_to_v8i32
-; CHECK: S_ENDPGM
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+; FUNC-LABEL: @v32i8_to_v8i32
+; SI: S_ENDPGM
define void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
entry:
%1 = load <32 x i8> addrspace(2)* %0
@@ -15,12 +17,8 @@ entry:
ret void
}
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { "ShaderType"="0" }
-
-; CHECK-LABEL: @i8ptr_v16i8ptr
-; CHECK: S_ENDPGM
+; FUNC-LABEL: @i8ptr_v16i8ptr
+; SI: S_ENDPGM
define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
@@ -28,3 +26,53 @@ entry:
store <16 x i8> %1, <16 x i8> addrspace(1)* %out
ret void
}
+
+define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %load = load float addrspace(1)* %in, align 4
+ %bc = bitcast float %load to <2 x i16>
+ store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in, align 4
+ %bc = bitcast <2 x i16> %load to float
+ store float %bc, float addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 4
+ %bc = bitcast <4 x i8> %load to i32
+ store i32 %bc, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %bc = bitcast i32 %load to <4 x i8>
+ store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bitcast_v2i32_to_f64
+; SI: S_ENDPGM
+define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %add = add <2 x i32> %val, <i32 4, i32 9>
+ %bc = bitcast <2 x i32> %add to double
+ store double %bc, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @bitcast_f64_to_v2i32
+; SI: S_ENDPGM
+define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
+ %val = load double addrspace(1)* %in, align 8
+ %add = fadd double %val, 4.0
+ %bc = bitcast double %add to <2 x i32>
+ store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/bswap.ll b/test/CodeGen/R600/bswap.ll
new file mode 100644
index 0000000..6aebe85
--- /dev/null
+++ b/test/CodeGen/R600/bswap.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI < %s
+
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
+declare i64 @llvm.bswap.i64(i64) nounwind readnone
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) nounwind readnone
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) nounwind readnone
+
+define void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %bswap = call i32 @llvm.bswap.i32(i32 %val) nounwind readnone
+ store i32 %bswap, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %bswap = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %val) nounwind readnone
+ store <2 x i32> %bswap, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+define void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) nounwind {
+ %val = load <4 x i32> addrspace(1)* %in, align 16
+ %bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %val) nounwind readnone
+ store <4 x i32> %bswap, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+define void @test_bswap_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %bswap = call i64 @llvm.bswap.i64(i64 %val) nounwind readnone
+ store i64 %bswap, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+define void @test_bswap_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %val) nounwind readnone
+ store <2 x i64> %bswap, <2 x i64> addrspace(1)* %out, align 16
+ ret void
+}
+
+define void @test_bswap_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) nounwind {
+ %val = load <4 x i64> addrspace(1)* %in, align 32
+ %bswap = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %val) nounwind readnone
+ store <4 x i64> %bswap, <4 x i64> addrspace(1)* %out, align 32
+ ret void
+}
diff --git a/test/CodeGen/R600/ctlz_zero_undef.ll b/test/CodeGen/R600/ctlz_zero_undef.ll
new file mode 100644
index 0000000..15b5188
--- /dev/null
+++ b/test/CodeGen/R600/ctlz_zero_undef.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
+
+; FUNC-LABEL: @s_ctlz_zero_undef_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_FLBIT_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_FFBH_U32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_v2i32:
+; SI: BUFFER_LOAD_DWORDX2
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <2 x i32> addrspace(1)* %valptr, align 8
+ %ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
+ store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_v4i32:
+; SI: BUFFER_LOAD_DWORDX4
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: BUFFER_STORE_DWORDX4
+; SI: S_ENDPGM
+define void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <4 x i32> addrspace(1)* %valptr, align 16
+ %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
+ store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/ctpop.ll b/test/CodeGen/R600/ctpop.ll
new file mode 100644
index 0000000..15be8e1
--- /dev/null
+++ b/test/CodeGen/R600/ctpop.ll
@@ -0,0 +1,284 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readnone
+declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
+
+; FUNC-LABEL: @s_ctpop_i32:
+; SI: S_LOAD_DWORD [[SVAL:s[0-9]+]],
+; SI: S_BCNT1_I32_B32 [[SRESULT:s[0-9]+]], [[SVAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ store i32 %ctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - Why 0 in register?
+; FUNC-LABEL: @v_ctpop_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VZERO]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ store i32 %ctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_add_chain_i32
+; SI: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]],
+; SI: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[MIDRESULT:v[0-9]+]], [[VAL1]], [[VZERO]]
+; SI-NOT: ADD
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
+ %val0 = load i32 addrspace(1)* %in0, align 4
+ %val1 = load i32 addrspace(1)* %in1, align 4
+ %ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
+ %ctpop1 = call i32 @llvm.ctpop.i32(i32 %val1) nounwind readnone
+ %add = add i32 %ctpop0, %ctpop1
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v2i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %ctpop = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %val) nounwind readnone
+ store <2 x i32> %ctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v4i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <4 x i32> addrspace(1)* %in, align 16
+ %ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val) nounwind readnone
+ store <4 x i32> %ctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v8i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <8 x i32> addrspace(1)* %in, align 32
+ %ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %val) nounwind readnone
+ store <8 x i32> %ctpop, <8 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v16i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <16 x i32> addrspace(1)* %in, align 32
+ %ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %val) nounwind readnone
+ store <16 x i32> %ctpop, <16 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_inline_constant:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, 4
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_inline_constant_inv:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 4, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_literal:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[LIT:v[0-9]+]], 0x1869f
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, 99999
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_var:
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, %const
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_var_inv:
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %const, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_vvar_inv
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]], {{.*}} + 0x0
+; SI-DAG: BUFFER_LOAD_DWORD [[VAR:v[0-9]+]], {{.*}} + 0x10
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %gep = getelementptr i32 addrspace(1)* %constptr, i32 4
+ %const = load i32 addrspace(1)* %gep, align 4
+ %add = add i32 %const, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: We currently disallow SALU instructions in all branches,
+; but there are some cases when the should be allowed.
+
+; FUNC-LABEL: @ctpop_i32_in_br
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 0
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+; EG: BCNT_INT
+define void @ctpop_i32_in_br(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i32 addrspace(1)* %in
+ %2 = call i32 @llvm.ctpop.i32(i32 %1)
+ br label %endif
+
+else:
+ %3 = getelementptr i32 addrspace(1)* %in, i32 1
+ %4 = load i32 addrspace(1)* %3
+ br label %endif
+
+endif:
+ %5 = phi i32 [%2, %if], [%4, %else]
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ctpop64.ll b/test/CodeGen/R600/ctpop64.ll
new file mode 100644
index 0000000..b36ecc6
--- /dev/null
+++ b/test/CodeGen/R600/ctpop64.ll
@@ -0,0 +1,122 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i64 @llvm.ctpop.i64(i64) nounwind readnone
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
+declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
+declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>) nounwind readnone
+
+; FUNC-LABEL: @s_ctpop_i64:
+; SI: S_LOAD_DWORDX2 [[SVAL:s\[[0-9]+:[0-9]+\]]],
+; SI: S_BCNT1_I32_B64 [[SRESULT:s[0-9]+]], [[SVAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
+ %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
+ %truncctpop = trunc i64 %ctpop to i32
+ store i32 %truncctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i64:
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], [[VZERO]]
+; SI-NEXT: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
+ %truncctpop = trunc i64 %ctpop to i32
+ store i32 %truncctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_ctpop_v2i64:
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_ENDPGM
+define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
+ %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
+ %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
+ store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_ctpop_v4i64:
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_ENDPGM
+define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
+ %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
+ %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
+ store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v2i64:
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: S_ENDPGM
+define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
+ %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
+ store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v4i64:
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: S_ENDPGM
+define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
+ %val = load <4 x i64> addrspace(1)* %in, align 32
+ %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
+ %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
+ store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FIXME: We currently disallow SALU instructions in all branches,
+; but there are some cases when the should be allowed.
+
+; FUNC-LABEL: @ctpop_i64_in_br
+; SI: V_BCNT_U32_B32_e64 [[BCNT_LO:v[0-9]+]], v{{[0-9]+}}, 0
+; SI: V_BCNT_U32_B32_e32 v[[BCNT:[0-9]+]], v{{[0-9]+}}, [[BCNT_LO]]
+; SI: V_MOV_B32_e32 v[[ZERO:[0-9]+]], 0
+; SI: BUFFER_STORE_DWORDX2 v[
+; SI: [[BCNT]]:[[ZERO]]]
+; SI: S_ENDPGM
+define void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i64 addrspace(1)* %in
+ %2 = call i64 @llvm.ctpop.i64(i64 %1)
+ br label %endif
+
+else:
+ %3 = getelementptr i64 addrspace(1)* %in, i32 1
+ %4 = load i64 addrspace(1)* %3
+ br label %endif
+
+endif:
+ %5 = phi i64 [%2, %if], [%4, %else]
+ store i64 %5, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/cttz_zero_undef.ll b/test/CodeGen/R600/cttz_zero_undef.ll
new file mode 100644
index 0000000..cf44f8e
--- /dev/null
+++ b/test/CodeGen/R600/cttz_zero_undef.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1) nounwind readnone
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) nounwind readnone
+
+; FUNC-LABEL: @s_cttz_zero_undef_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_FF1_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %cttz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_FFBL_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %cttz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_v2i32:
+; SI: BUFFER_LOAD_DWORDX2
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <2 x i32> addrspace(1)* %valptr, align 8
+ %cttz = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
+ store <2 x i32> %cttz, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_v4i32:
+; SI: BUFFER_LOAD_DWORDX4
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: BUFFER_STORE_DWORDX4
+; SI: S_ENDPGM
+define void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <4 x i32> addrspace(1)* %valptr, align 16
+ %cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
+ store <4 x i32> %cttz, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/cvt_f32_ubyte.ll b/test/CodeGen/R600/cvt_f32_ubyte.ll
new file mode 100644
index 0000000..fe97a44
--- /dev/null
+++ b/test/CodeGen/R600/cvt_f32_ubyte.ll
@@ -0,0 +1,171 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @load_i8_to_f32:
+; SI: BUFFER_LOAD_UBYTE [[LOADREG:v[0-9]+]],
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORD [[CONV]],
+define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+ %load = load i8 addrspace(1)* %in, align 1
+ %cvt = uitofp i8 %load to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @load_v2i8_to_v2f32:
+; SI: BUFFER_LOAD_USHORT [[LOADREG:v[0-9]+]],
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-NOT: AND
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <2 x i8> %load to <2 x float>
+ store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v3i8_to_v3f32:
+; SI-NOT: BFE
+; SI-NOT: V_CVT_F32_UBYTE3_e32
+; SI-DAG: V_CVT_F32_UBYTE2_e32
+; SI-DAG: V_CVT_F32_UBYTE1_e32
+; SI-DAG: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <3 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <3 x i8> %load to <3 x float>
+ store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v4i8_to_v4f32:
+; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORDX4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <4 x i8> %load to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; XXX - This should really still be able to use the V_CVT_F32_UBYTE0
+; for each component, but computeKnownBits doesn't handle vectors very
+; well.
+
+; SI-LABEL: @load_v4i8_to_v4f32_2_uses:
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+
+; XXX - replace with this when v4i8 loads aren't scalarized anymore.
+; XSI: BUFFER_LOAD_DWORD
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
+define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 4
+ %cvt = uitofp <4 x i8> %load to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; Make sure this doesn't crash.
+; SI-LABEL: @load_v7i8_to_v7f32:
+; SI: S_ENDPGM
+define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <7 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <7 x i8> %load to <7 x float>
+ store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v8i8_to_v8f32:
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <8 x i8> %load to <8 x float>
+ store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @i8_zext_inreg_i32_to_f32:
+; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
+; SI: V_ADD_I32_e32 [[ADD:v[0-9]+]], 2, [[LOADREG]]
+; SI-NEXT: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[ADD]]
+; SI: BUFFER_STORE_DWORD [[CONV]],
+define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 2
+ %inreg = and i32 %add, 255
+ %cvt = uitofp i32 %inreg to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @i8_zext_inreg_hi1_to_f32:
+define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %inreg = and i32 %load, 65280
+ %shr = lshr i32 %inreg, 8
+ %cvt = uitofp i32 %shr to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; We don't get these ones because of the zext, but instcombine removes
+; them so it shouldn't really matter.
+define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+ %load = load i8 addrspace(1)* %in, align 1
+ %ext = zext i8 %load to i32
+ %cvt = uitofp i32 %ext to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 1
+ %ext = zext <4 x i8> %load to <4 x i32>
+ %cvt = uitofp <4 x i32> %ext to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/default-fp-mode.ll b/test/CodeGen/R600/default-fp-mode.ll
new file mode 100644
index 0000000..214b2c2
--- /dev/null
+++ b/test/CodeGen/R600/default-fp-mode.ll
@@ -0,0 +1,10 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @test_kernel
+; SI: FloatMode: 240
+; SI: IeeeMode: 0
+define void @test_kernel(float addrspace(1)* %out0, double addrspace(1)* %out1) nounwind {
+ store float 0.0, float addrspace(1)* %out0
+ store double 0.0, double addrspace(1)* %out1
+ ret void
+}
diff --git a/test/CodeGen/R600/fceil.ll b/test/CodeGen/R600/fceil.ll
index b8b945f..458363a 100644
--- a/test/CodeGen/R600/fceil.ll
+++ b/test/CodeGen/R600/fceil.ll
@@ -1,84 +1,131 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-declare double @llvm.ceil.f64(double) nounwind readnone
-declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
-declare <3 x double> @llvm.ceil.v3f64(<3 x double>) nounwind readnone
-declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
-declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
-declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
+declare float @llvm.ceil.f32(float) nounwind readnone
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.ceil.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+declare <8 x float> @llvm.ceil.v8f32(<8 x float>) nounwind readnone
+declare <16 x float> @llvm.ceil.v16f32(<16 x float>) nounwind readnone
-; CI-LABEL: @fceil_f64:
-; CI: V_CEIL_F64_e32
-define void @fceil_f64(double addrspace(1)* %out, double %x) {
- %y = call double @llvm.ceil.f64(double %x) nounwind readnone
- store double %y, double addrspace(1)* %out
+; FUNC-LABEL: @fceil_f32:
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_f32(float addrspace(1)* %out, float %x) {
+ %y = call float @llvm.ceil.f32(float %x) nounwind readnone
+ store float %y, float addrspace(1)* %out
ret void
}
-; CI-LABEL: @fceil_v2f64:
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
- %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
- store <2 x double> %y, <2 x double> addrspace(1)* %out
+; FUNC-LABEL: @fceil_v2f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+ %y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x) nounwind readnone
+ store <2 x float> %y, <2 x float> addrspace(1)* %out
ret void
}
-; FIXME-CI-LABEL: @fceil_v3f64:
-; FIXME-CI: V_CEIL_F64_e32
-; FIXME-CI: V_CEIL_F64_e32
-; FIXME-CI: V_CEIL_F64_e32
-; define void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
-; %y = call <3 x double> @llvm.ceil.v3f64(<3 x double> %x) nounwind readnone
-; store <3 x double> %y, <3 x double> addrspace(1)* %out
-; ret void
-; }
+; FUNC-LABEL: @fceil_v3f32:
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-EG: v3 is treated as v2 and v1, hence 2 stores
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+define void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+ %y = call <3 x float> @llvm.ceil.v3f32(<3 x float> %x) nounwind readnone
+ store <3 x float> %y, <3 x float> addrspace(1)* %out
+ ret void
+}
-; CI-LABEL: @fceil_v4f64:
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
- %y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
- store <4 x double> %y, <4 x double> addrspace(1)* %out
+; FUNC-LABEL: @fceil_v4f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+ %y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone
+ store <4 x float> %y, <4 x float> addrspace(1)* %out
ret void
}
-; CI-LABEL: @fceil_v8f64:
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
- %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
- store <8 x double> %y, <8 x double> addrspace(1)* %out
+; FUNC-LABEL: @fceil_v8f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+define void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+ %y = call <8 x float> @llvm.ceil.v8f32(<8 x float> %x) nounwind readnone
+ store <8 x float> %y, <8 x float> addrspace(1)* %out
ret void
}
-; CI-LABEL: @fceil_v16f64:
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-; CI: V_CEIL_F64_e32
-define void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
- %y = call <16 x double> @llvm.ceil.v16f64(<16 x double> %x) nounwind readnone
- store <16 x double> %y, <16 x double> addrspace(1)* %out
+; FUNC-LABEL: @fceil_v16f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT3:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT4:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+define void @fceil_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+ %y = call <16 x float> @llvm.ceil.v16f32(<16 x float> %x) nounwind readnone
+ store <16 x float> %y, <16 x float> addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/fceil64.ll b/test/CodeGen/R600/fceil64.ll
new file mode 100644
index 0000000..b42aefa
--- /dev/null
+++ b/test/CodeGen/R600/fceil64.ll
@@ -0,0 +1,103 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.ceil.f64(double) nounwind readnone
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.ceil.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
+
+; FUNC-LABEL: @fceil_f64:
+; CI: V_CEIL_F64_e32
+; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: S_ADD_I32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
+; SI: S_LSHR_B64
+; SI: S_NOT_B64
+; SI: S_AND_B64
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
+; SI: CMP_LT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_F64
+; SI: CNDMASK_B32
+; SI: CMP_NE_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: V_ADD_F64
+define void @fceil_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.ceil.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v2f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-FUNC-LABEL: @fceil_v3f64:
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; define void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.ceil.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: @fceil_v4f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v8f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v16f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.ceil.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fcopysign.f32.ll b/test/CodeGen/R600/fcopysign.f32.ll
new file mode 100644
index 0000000..7b4425b
--- /dev/null
+++ b/test/CodeGen/R600/fcopysign.f32.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+
+declare float @llvm.copysign.f32(float, float) nounwind readnone
+declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind readnone
+
+; Try to identify arg based on higher address.
+; FUNC-LABEL: @test_copysign_f32:
+; SI: S_LOAD_DWORD [[SSIGN:s[0-9]+]], {{.*}} 0xc
+; SI: V_MOV_B32_e32 [[VSIGN:v[0-9]+]], [[SSIGN]]
+; SI-DAG: S_LOAD_DWORD [[SMAG:s[0-9]+]], {{.*}} 0xb
+; SI-DAG: V_MOV_B32_e32 [[VMAG:v[0-9]+]], [[SMAG]]
+; SI-DAG: S_MOV_B32 [[SCONST:s[0-9]+]], 0x7fffffff
+; SI: V_BFI_B32 [[RESULT:v[0-9]+]], [[SCONST]], [[VMAG]], [[VSIGN]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign) nounwind {
+ %result = call float @llvm.copysign.f32(float %mag, float %sign)
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v2f32:
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+; EG: BFI_INT
+define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %mag, <2 x float> %sign) nounwind {
+ %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
+ store <2 x float> %result, <2 x float> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v4f32:
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+define void @test_copysign_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %mag, <4 x float> %sign) nounwind {
+ %result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
+ store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
diff --git a/test/CodeGen/R600/fcopysign.f64.ll b/test/CodeGen/R600/fcopysign.f64.ll
new file mode 100644
index 0000000..ea7a6db
--- /dev/null
+++ b/test/CodeGen/R600/fcopysign.f64.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.copysign.f64(double, double) nounwind readnone
+declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) nounwind readnone
+declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind readnone
+
+; FUNC-LABEL: @test_copysign_f64:
+; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI: V_MOV_B32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
+; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: V_MOV_B32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
+; SI-DAG: S_MOV_B32 [[SCONST:s[0-9]+]], 0x7fffffff
+; SI: V_BFI_B32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN_HI]]
+; SI: V_MOV_B32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
+; SI: S_ENDPGM
+define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %sign) nounwind {
+ %result = call double @llvm.copysign.f64(double %mag, double %sign)
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v2f64:
+; SI: S_ENDPGM
+define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
+ %result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
+ store <2 x double> %result, <2 x double> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v4f64:
+; SI: S_ENDPGM
+define void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
+ %result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
+ store <4 x double> %result, <4 x double> addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/ffloor.ll b/test/CodeGen/R600/ffloor.ll
index 51d2b89..31c6116 100644
--- a/test/CodeGen/R600/ffloor.ll
+++ b/test/CodeGen/R600/ffloor.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.floor.f64(double) nounwind readnone
declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
@@ -7,15 +8,34 @@ declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
-; CI-LABEL: @ffloor_f64:
+; FUNC-LABEL: @ffloor_f64:
; CI: V_FLOOR_F64_e32
+
+; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: S_ADD_I32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
+; SI: S_LSHR_B64
+; SI: S_NOT_B64
+; SI: S_AND_B64
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
+; SI: CMP_LT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_LT_F64
+; SI: CNDMASK_B32
+; SI: CMP_NE_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: V_ADD_F64
define void @ffloor_f64(double addrspace(1)* %out, double %x) {
%y = call double @llvm.floor.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out
ret void
}
-; CI-LABEL: @ffloor_v2f64:
+; FUNC-LABEL: @ffloor_v2f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
@@ -24,7 +44,7 @@ define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
ret void
}
-; FIXME-CI-LABEL: @ffloor_v3f64:
+; FIXME-FUNC-LABEL: @ffloor_v3f64:
; FIXME-CI: V_FLOOR_F64_e32
; FIXME-CI: V_FLOOR_F64_e32
; FIXME-CI: V_FLOOR_F64_e32
@@ -34,7 +54,7 @@ define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; ret void
; }
-; CI-LABEL: @ffloor_v4f64:
+; FUNC-LABEL: @ffloor_v4f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
@@ -45,7 +65,7 @@ define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
ret void
}
-; CI-LABEL: @ffloor_v8f64:
+; FUNC-LABEL: @ffloor_v8f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
@@ -60,7 +80,7 @@ define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
ret void
}
-; CI-LABEL: @ffloor_v16f64:
+; FUNC-LABEL: @ffloor_v16f64:
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
; CI: V_FLOOR_F64_e32
diff --git a/test/CodeGen/R600/fma.ll b/test/CodeGen/R600/fma.ll
index 51e9d29..d72ffec 100644
--- a/test/CodeGen/R600/fma.ll
+++ b/test/CodeGen/R600/fma.ll
@@ -1,8 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; CHECK: @fma_f32
-; CHECK: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
+declare float @llvm.fma.f32(float, float, float) nounwind readnone
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+declare double @llvm.fma.f64(double, double, double) nounwind readnone
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
+
+; FUNC-LABEL: @fma_f32
+; SI: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) {
%r0 = load float addrspace(1)* %in1
@@ -13,11 +20,36 @@ define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-declare float @llvm.fma.f32(float, float, float)
+; FUNC-LABEL: @fma_v2f32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
+ <2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) {
+ %r0 = load <2 x float> addrspace(1)* %in1
+ %r1 = load <2 x float> addrspace(1)* %in2
+ %r2 = load <2 x float> addrspace(1)* %in3
+ %r3 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %r0, <2 x float> %r1, <2 x float> %r2)
+ store <2 x float> %r3, <2 x float> addrspace(1)* %out
+ ret void
+}
-; CHECK: @fma_f64
-; CHECK: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
+; FUNC-LABEL: @fma_v4f32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+define void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
+ <4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) {
+ %r0 = load <4 x float> addrspace(1)* %in1
+ %r1 = load <4 x float> addrspace(1)* %in2
+ %r2 = load <4 x float> addrspace(1)* %in3
+ %r3 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %r0, <4 x float> %r1, <4 x float> %r2)
+ store <4 x float> %r3, <4 x float> addrspace(1)* %out
+ ret void
+}
+; FUNC-LABEL: @fma_f64
+; SI: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) {
%r0 = load double addrspace(1)* %in1
@@ -28,4 +60,30 @@ define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-declare double @llvm.fma.f64(double, double, double)
+; FUNC-LABEL: @fma_v2f64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+ <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) {
+ %r0 = load <2 x double> addrspace(1)* %in1
+ %r1 = load <2 x double> addrspace(1)* %in2
+ %r2 = load <2 x double> addrspace(1)* %in3
+ %r3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %r0, <2 x double> %r1, <2 x double> %r2)
+ store <2 x double> %r3, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fma_v4f64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+define void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
+ <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) {
+ %r0 = load <4 x double> addrspace(1)* %in1
+ %r1 = load <4 x double> addrspace(1)* %in2
+ %r2 = load <4 x double> addrspace(1)* %in3
+ %r3 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %r0, <4 x double> %r1, <4 x double> %r2)
+ store <4 x double> %r3, <4 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fnearbyint.ll b/test/CodeGen/R600/fnearbyint.ll
new file mode 100644
index 0000000..1c1d731
--- /dev/null
+++ b/test/CodeGen/R600/fnearbyint.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s
+; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s
+
+; This should have the exactly the same output as the test for rint,
+; so no need to check anything.
+
+declare float @llvm.nearbyint.f32(float) #0
+declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) #0
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) #0
+declare double @llvm.nearbyint.f64(double) #0
+declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
+declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) #0
+
+
+define void @fnearbyint_f32(float addrspace(1)* %out, float %in) #1 {
+entry:
+ %0 = call float @llvm.nearbyint.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+define void @fnearbyint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+entry:
+ %0 = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+define void @fnearbyint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
+entry:
+ %0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+define void @nearbyint_f64(double addrspace(1)* %out, double %in) {
+entry:
+ %0 = call double @llvm.nearbyint.f64(double %in)
+ store double %0, double addrspace(1)* %out
+ ret void
+}
+define void @nearbyint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+entry:
+ %0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %in)
+ store <2 x double> %0, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+define void @nearbyint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+entry:
+ %0 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %in)
+ store <4 x double> %0, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/fp16_to_fp32.ll b/test/CodeGen/R600/fp16_to_fp32.ll
new file mode 100644
index 0000000..fa2e379
--- /dev/null
+++ b/test/CodeGen/R600/fp16_to_fp32.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare i16 @llvm.convert.to.fp16(float) nounwind readnone
+
+; SI-LABEL: @test_convert_fp16_to_fp32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]]
+; SI: V_CVT_F16_F32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+define void @test_convert_fp16_to_fp32(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float addrspace(1)* %in, align 4
+ %cvt = call i16 @llvm.convert.to.fp16(float %val) nounwind readnone
+ store i16 %cvt, i16 addrspace(1)* %out, align 2
+ ret void
+}
diff --git a/test/CodeGen/R600/fp32_to_fp16.ll b/test/CodeGen/R600/fp32_to_fp16.ll
new file mode 100644
index 0000000..9997cd3
--- /dev/null
+++ b/test/CodeGen/R600/fp32_to_fp16.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.convert.from.fp16(i16) nounwind readnone
+
+; SI-LABEL: @test_convert_fp16_to_fp32:
+; SI: BUFFER_LOAD_USHORT [[VAL:v[0-9]+]]
+; SI: V_CVT_F32_F16_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %val = load i16 addrspace(1)* %in, align 2
+ %cvt = call float @llvm.convert.from.fp16(i16 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/fp_to_sint_i64.ll b/test/CodeGen/R600/fp_to_sint_i64.ll
new file mode 100644
index 0000000..ec3e198
--- /dev/null
+++ b/test/CodeGen/R600/fp_to_sint_i64.ll
@@ -0,0 +1,12 @@
+; FIXME: Merge into fp_to_sint.ll when EG/NI supports 64-bit types
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+
+; SI-LABEL: @fp_to_sint_i64
+; Check that the compiler doesn't crash with a "cannot select" error
+; SI: S_ENDPGM
+define void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
+entry:
+ %0 = fptosi float %in to i64
+ store i64 %0, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fsub64.ll b/test/CodeGen/R600/fsub64.ll
index 1445a20..f5e5708 100644
--- a/test/CodeGen/R600/fsub64.ll
+++ b/test/CodeGen/R600/fsub64.ll
@@ -1,8 +1,7 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-
-; CHECK: @fsub_f64
-; CHECK: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}, 0, 0, 0, 0, 2
+; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; SI-LABEL: @fsub_f64:
+; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
diff --git a/test/CodeGen/R600/ftrunc.ll b/test/CodeGen/R600/ftrunc.ll
index 6b235ff..0d7d467 100644
--- a/test/CodeGen/R600/ftrunc.ll
+++ b/test/CodeGen/R600/ftrunc.ll
@@ -1,84 +1,119 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI --check-prefix=FUNC %s
-declare double @llvm.trunc.f64(double) nounwind readnone
-declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
-declare <3 x double> @llvm.trunc.v3f64(<3 x double>) nounwind readnone
-declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
-declare <8 x double> @llvm.trunc.v8f64(<8 x double>) nounwind readnone
-declare <16 x double> @llvm.trunc.v16f64(<16 x double>) nounwind readnone
+declare float @llvm.trunc.f32(float) nounwind readnone
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.trunc.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+declare <8 x float> @llvm.trunc.v8f32(<8 x float>) nounwind readnone
+declare <16 x float> @llvm.trunc.v16f32(<16 x float>) nounwind readnone
-; CI-LABEL: @ftrunc_f64:
-; CI: V_TRUNC_F64_e32
-define void @ftrunc_f64(double addrspace(1)* %out, double %x) {
- %y = call double @llvm.trunc.f64(double %x) nounwind readnone
- store double %y, double addrspace(1)* %out
+; FUNC-LABEL: @ftrunc_f32:
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
+ %y = call float @llvm.trunc.f32(float %x) nounwind readnone
+ store float %y, float addrspace(1)* %out
ret void
}
-; CI-LABEL: @ftrunc_v2f64:
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-define void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
- %y = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) nounwind readnone
- store <2 x double> %y, <2 x double> addrspace(1)* %out
+; FUNC-LABEL: @ftrunc_v2f32:
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+ %y = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) nounwind readnone
+ store <2 x float> %y, <2 x float> addrspace(1)* %out
ret void
}
-; FIXME-CI-LABEL: @ftrunc_v3f64:
-; FIXME-CI: V_TRUNC_F64_e32
-; FIXME-CI: V_TRUNC_F64_e32
-; FIXME-CI: V_TRUNC_F64_e32
-; define void @ftrunc_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
-; %y = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) nounwind readnone
-; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; FIXME-FUNC-LABEL: @ftrunc_v3f32:
+; FIXME-EG: TRUNC
+; FIXME-EG: TRUNC
+; FIXME-EG: TRUNC
+; FIXME-SI: V_TRUNC_F32_e32
+; FIXME-SI: V_TRUNC_F32_e32
+; FIXME-SI: V_TRUNC_F32_e32
+; define void @ftrunc_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+; %y = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) nounwind readnone
+; store <3 x float> %y, <3 x float> addrspace(1)* %out
; ret void
; }
-; CI-LABEL: @ftrunc_v4f64:
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-define void @ftrunc_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
- %y = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone
- store <4 x double> %y, <4 x double> addrspace(1)* %out
+; FUNC-LABEL: @ftrunc_v4f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+ %y = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone
+ store <4 x float> %y, <4 x float> addrspace(1)* %out
ret void
}
-; CI-LABEL: @ftrunc_v8f64:
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-define void @ftrunc_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
- %y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
- store <8 x double> %y, <8 x double> addrspace(1)* %out
+; FUNC-LABEL: @ftrunc_v8f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+ %y = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) nounwind readnone
+ store <8 x float> %y, <8 x float> addrspace(1)* %out
ret void
}
-; CI-LABEL: @ftrunc_v16f64:
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-; CI: V_TRUNC_F64_e32
-define void @ftrunc_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
- %y = call <16 x double> @llvm.trunc.v16f64(<16 x double> %x) nounwind readnone
- store <16 x double> %y, <16 x double> addrspace(1)* %out
+; FUNC-LABEL: @ftrunc_v16f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+ %y = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) nounwind readnone
+ store <16 x float> %y, <16 x float> addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/gv-const-addrspace.ll b/test/CodeGen/R600/gv-const-addrspace.ll
index 0176061..db64a6f 100644
--- a/test/CodeGen/R600/gv-const-addrspace.ll
+++ b/test/CodeGen/R600/gv-const-addrspace.ll
@@ -6,7 +6,7 @@
; XXX: Test on SI once 64-bit adds are supportes.
-@float_gv = internal addrspace(2) unnamed_addr constant [5 x float] [float 0.0, float 1.0, float 2.0, float 3.0, float 4.0], align 4
+@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.0, float 1.0, float 2.0, float 3.0, float 4.0], align 4
; FUNC-LABEL: @float
@@ -25,7 +25,7 @@ entry:
ret void
}
-@i32_gv = internal addrspace(2) unnamed_addr constant [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4], align 4
+@i32_gv = internal unnamed_addr addrspace(2) constant [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4], align 4
; FUNC-LABEL: @i32
@@ -47,7 +47,7 @@ entry:
%struct.foo = type { float, [5 x i32] }
-@struct_foo_gv = internal addrspace(2) unnamed_addr constant [1 x %struct.foo] [ %struct.foo { float 16.0, [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4] } ]
+@struct_foo_gv = internal unnamed_addr addrspace(2) constant [1 x %struct.foo] [ %struct.foo { float 16.0, [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4] } ]
; FUNC-LABEL: @struct_foo_gv_load
diff --git a/test/CodeGen/R600/indirect-private-64.ll b/test/CodeGen/R600/indirect-private-64.ll
index 4d1f734..b127b7e 100644
--- a/test/CodeGen/R600/indirect-private-64.ll
+++ b/test/CodeGen/R600/indirect-private-64.ll
@@ -3,10 +3,8 @@
declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
; SI-LABEL: @private_access_f64_alloca:
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
+; SI: DS_WRITE_B64
+; SI: DS_READ_B64
define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load double addrspace(1)* %in, align 8
%array = alloca double, i32 16, align 8
@@ -19,14 +17,10 @@ define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double
}
; SI-LABEL: @private_access_v2f64_alloca:
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
+; SI: DS_WRITE_B64
+; SI: DS_WRITE_B64
+; SI: DS_READ_B64
+; SI: DS_READ_B64
define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x double> addrspace(1)* %in, align 16
%array = alloca <2 x double>, i32 16, align 16
@@ -39,10 +33,8 @@ define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out
}
; SI-LABEL: @private_access_i64_alloca:
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
+; SI: DS_WRITE_B64
+; SI: DS_READ_B64
define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load i64 addrspace(1)* %in, align 8
%array = alloca i64, i32 16, align 8
@@ -55,14 +47,10 @@ define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrs
}
; SI-LABEL: @private_access_v2i64_alloca:
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELD_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
-; SI: V_MOVRELS_B32_e32
+; SI: DS_WRITE_B64
+; SI: DS_WRITE_B64
+; SI: DS_READ_B64
+; SI: DS_READ_B64
define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x i64> addrspace(1)* %in, align 16
%array = alloca <2 x i64>, i32 16, align 16
diff --git a/test/CodeGen/R600/input-mods.ll b/test/CodeGen/R600/input-mods.ll
new file mode 100644
index 0000000..13bfbab
--- /dev/null
+++ b/test/CodeGen/R600/input-mods.ll
@@ -0,0 +1,26 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+
+;EG-CHECK-LABEL: @test
+;EG-CHECK: EXP_IEEE *
+;CM-CHECK-LABEL: @test
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
+
+define void @test(<4 x float> inreg %reg0) #0 {
+ %r0 = extractelement <4 x float> %reg0, i32 0
+ %r1 = call float @llvm.fabs.f32(float %r0)
+ %r2 = fsub float -0.000000e+00, %r1
+ %r3 = call float @llvm.exp2.f32(float %r2)
+ %vec = insertelement <4 x float> undef, float %r3, i32 0
+ call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) readnone
+declare float @llvm.fabs.f32(float) readnone
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/large-alloca.ll b/test/CodeGen/R600/large-alloca.ll
new file mode 100644
index 0000000..d8be6d4
--- /dev/null
+++ b/test/CodeGen/R600/large-alloca.ll
@@ -0,0 +1,14 @@
+; XFAIL: *
+; REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=SI < %s
+
+define void @large_alloca(i32 addrspace(1)* %out, i32 %x, i32 %y) nounwind {
+ %large = alloca [8192 x i32], align 4
+ %gep = getelementptr [8192 x i32]* %large, i32 0, i32 8191
+ store i32 %x, i32* %gep
+ %gep1 = getelementptr [8192 x i32]* %large, i32 0, i32 %y
+ %0 = load i32* %gep1
+ store i32 %0, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/large-constant-initializer.ll b/test/CodeGen/R600/large-constant-initializer.ll
new file mode 100644
index 0000000..552cd05
--- /dev/null
+++ b/test/CodeGen/R600/large-constant-initializer.ll
@@ -0,0 +1,19 @@
+; XFAIL: *
+; REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=SI < %s
+
+@gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4
+
+define void @opencv_cvtfloat_crash(i32 addrspace(1)* %out, i32 %x) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
+ %mul12 = mul nsw i32 %val, 7
+ br i1 undef, label %exit, label %bb
+
+bb:
+ %cmp = icmp slt i32 %x, 0
+ br label %exit
+
+exit:
+ ret void
+}
+
diff --git a/test/CodeGen/R600/lds-output-queue.ll b/test/CodeGen/R600/lds-output-queue.ll
index af0db0d..d5dc061 100644
--- a/test/CodeGen/R600/lds-output-queue.ll
+++ b/test/CodeGen/R600/lds-output-queue.ll
@@ -8,7 +8,7 @@
; CHECK-NOT: ALU clause
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
-@local_mem = internal addrspace(3) unnamed_addr global [2 x i32] [i32 1, i32 2], align 4
+@local_mem = internal unnamed_addr addrspace(3) global [2 x i32] [i32 1, i32 2], align 4
define void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
entry:
diff --git a/test/CodeGen/R600/lds-size.ll b/test/CodeGen/R600/lds-size.ll
index 2185180..9182e25 100644
--- a/test/CodeGen/R600/lds-size.ll
+++ b/test/CodeGen/R600/lds-size.ll
@@ -6,7 +6,7 @@
; CHECK-LABEL: @test
; CHECK: .long 166120
; CHECK-NEXT: .long 1
-@lds = internal addrspace(3) unnamed_addr global i32 zeroinitializer, align 4
+@lds = internal unnamed_addr addrspace(3) global i32 zeroinitializer, align 4
define void @test(i32 addrspace(1)* %out, i32 %cond) {
entry:
diff --git a/test/CodeGen/R600/lit.local.cfg b/test/CodeGen/R600/lit.local.cfg
index 2d8930a..ad9ce25 100644
--- a/test/CodeGen/R600/lit.local.cfg
+++ b/test/CodeGen/R600/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'R600' in targets:
+if not 'R600' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/R600/llvm.AMDGPU.abs.ll b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
new file mode 100644
index 0000000..a0a47b7
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
@@ -0,0 +1,48 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.abs(i32) nounwind readnone
+
+; Legacy name
+declare i32 @llvm.AMDIL.abs.i32(i32) nounwind readnone
+
+; FUNC-LABEL: @s_abs_i32
+; SI: S_SUB_I32
+; SI: S_MAX_I32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @s_abs_i32(i32 addrspace(1)* %out, i32 %src) nounwind {
+ %abs = call i32 @llvm.AMDGPU.abs(i32 %src) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_abs_i32
+; SI: V_SUB_I32_e32
+; SI: V_MAX_I32_e32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+ %val = load i32 addrspace(1)* %src, align 4
+ %abs = call i32 @llvm.AMDGPU.abs(i32 %val) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @abs_i32_legacy_amdil
+; SI: V_SUB_I32_e32
+; SI: V_MAX_I32_e32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @abs_i32_legacy_amdil(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+ %val = load i32 addrspace(1)* %src, align 4
+ %abs = call i32 @llvm.AMDIL.abs.i32(i32 %val) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.brev.ll b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
new file mode 100644
index 0000000..68a5ad0
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.brev(i32) nounwind readnone
+
+; FUNC-LABEL: @s_brev_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_BREV_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctlz = call i32 @llvm.AMDGPU.brev(i32 %val) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_brev_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BFREV_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %ctlz = call i32 @llvm.AMDGPU.brev(i32 %val) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
new file mode 100644
index 0000000..d608953
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.clamp.f32(float, float, float) nounwind readnone
+declare float @llvm.AMDIL.clamp.f32(float, float, float) nounwind readnone
+
+; FUNC-LABEL: @clamp_0_1_f32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], [[ARG]], 0, 1, 0
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+; SI: S_ENDPGM
+
+; EG: MOV_SAT
+define void @clamp_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
+ %clamp = call float @llvm.AMDGPU.clamp.f32(float %src, float 0.0, float 1.0) nounwind readnone
+ store float %clamp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @clamp_0_1_amdil_legacy_f32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], [[ARG]], 0, 1, 0
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+define void @clamp_0_1_amdil_legacy_f32(float addrspace(1)* %out, float %src) nounwind {
+ %clamp = call float @llvm.AMDIL.clamp.f32(float %src, float 0.0, float 1.0) nounwind readnone
+ store float %clamp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
new file mode 100644
index 0000000..6facb47
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
@@ -0,0 +1,42 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.cvt.f32.ubyte0(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte1(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte2(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte3(i32) nounwind readnone
+
+; SI-LABEL: @test_unpack_byte0_to_float:
+; SI: V_CVT_F32_UBYTE0
+define void @test_unpack_byte0_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte0(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte1_to_float:
+; SI: V_CVT_F32_UBYTE1
+define void @test_unpack_byte1_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte1(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte2_to_float:
+; SI: V_CVT_F32_UBYTE2
+define void @test_unpack_byte2_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte2(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte3_to_float:
+; SI: V_CVT_F32_UBYTE3
+define void @test_unpack_byte3_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte3(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
new file mode 100644
index 0000000..c8c7357
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.div.fixup.f32(float, float, float) nounwind readnone
+declare double @llvm.AMDGPU.div.fixup.f64(double, double, double) nounwind readnone
+
+; SI-LABEL: @test_div_fixup_f32:
+; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: V_MOV_B32_e32 [[VC:v[0-9]+]], [[SC]]
+; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: V_MOV_B32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: V_DIV_FIXUP_F32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fixup.f32(float %a, float %b, float %c) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_div_fixup_f64:
+; SI: V_DIV_FIXUP_F64
+define void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
+ %result = call double @llvm.AMDGPU.div.fixup.f64(double %a, double %b, double %c) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
new file mode 100644
index 0000000..4f1e827
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.div.fmas.f32(float, float, float) nounwind readnone
+declare double @llvm.AMDGPU.div.fmas.f64(double, double, double) nounwind readnone
+
+; SI-LABEL: @test_div_fmas_f32:
+; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: V_MOV_B32_e32 [[VC:v[0-9]+]], [[SC]]
+; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: V_MOV_B32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: V_DIV_FMAS_F32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_div_fmas_f64:
+; SI: V_DIV_FMAS_F64
+define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
+ %result = call double @llvm.AMDGPU.div.fmas.f64(double %a, double %b, double %c) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
new file mode 100644
index 0000000..527c8da
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
@@ -0,0 +1,48 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare { float, i1 } @llvm.AMDGPU.div.scale.f32(float, float, i1) nounwind readnone
+declare { double, i1 } @llvm.AMDGPU.div.scale.f64(double, double, i1) nounwind readnone
+
+; SI-LABEL @test_div_scale_f32_1:
+; SI: V_DIV_SCALE_F32
+define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr) nounwind {
+ %a = load float addrspace(1)* %aptr, align 4
+ %b = load float addrspace(1)* %bptr, align 4
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f32_2:
+; SI: V_DIV_SCALE_F32
+define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr) nounwind {
+ %a = load float addrspace(1)* %aptr, align 4
+ %b = load float addrspace(1)* %bptr, align 4
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f64_1:
+; SI: V_DIV_SCALE_F64
+define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load double addrspace(1)* %bptr, align 8
+ %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
+ %result0 = extractvalue { double, i1 } %result, 0
+ store double %result0, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f64_1:
+; SI: V_DIV_SCALE_F64
+define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load double addrspace(1)* %bptr, align 8
+ %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
+ %result0 = extractvalue { double, i1 } %result, 0
+ store double %result0, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.fract.ll b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
new file mode 100644
index 0000000..72ec1c5
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.fract.f32(float) nounwind readnone
+
+; Legacy name
+declare float @llvm.AMDIL.fraction.f32(float) nounwind readnone
+
+; FUNC-LABEL: @fract_f32
+; SI: V_FRACT_F32
+; EG: FRACT
+define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
+ %val = load float addrspace(1)* %src, align 4
+ %fract = call float @llvm.AMDGPU.fract.f32(float %val) nounwind readnone
+ store float %fract, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @fract_f32_legacy_amdil
+; SI: V_FRACT_F32
+; EG: FRACT
+define void @fract_f32_legacy_amdil(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
+ %val = load float addrspace(1)* %src, align 4
+ %fract = call float @llvm.AMDIL.fraction.f32(float %val) nounwind readnone
+ store float %fract, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
new file mode 100644
index 0000000..51964ee
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.legacy.rsq(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_legacy_f32
+; SI: V_RSQ_LEGACY_F32_e32
+; EG: RECIPSQRT_IEEE
+define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq = call float @llvm.AMDGPU.legacy.rsq(float %src) nounwind readnone
+ store float %rsq, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
new file mode 100644
index 0000000..ca5260d
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.rcp.f32(float) nounwind readnone
+declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
+
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; FUNC-LABEL: @rcp_f32
+; SI: V_RCP_F32_e32
+define void @rcp_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rcp = call float @llvm.AMDGPU.rcp.f32(float %src) nounwind readnone
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rcp_f64
+; SI: V_RCP_F64_e32
+define void @rcp_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %src) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rcp_pat_f32
+; SI: V_RCP_F32_e32
+define void @rcp_pat_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rcp = fdiv float 1.0, %src
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rcp_pat_f64
+; SI: V_RCP_F64_e32
+define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = fdiv double 1.0, %src
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rsq_rcp_pat_f32
+; SI: V_RSQ_F32_e32
+define void @rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) nounwind {
+ %sqrt = call float @llvm.sqrt.f32(float %src) nounwind readnone
+ %rcp = call float @llvm.AMDGPU.rcp.f32(float %sqrt) nounwind readnone
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rsq_rcp_pat_f64
+; SI: V_RSQ_F64_e32
+define void @rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %sqrt = call double @llvm.sqrt.f64(double %src) nounwind readnone
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %sqrt) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
new file mode 100644
index 0000000..100d6ff
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.AMDGPU.rsq.clamped.f64(double) nounwind readnone
+
+; FUNC-LABEL: @rsq_clamped_f64
+; SI: V_RSQ_CLAMP_F64_e32
+define void @rsq_clamped_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rsq_clamped = call double @llvm.AMDGPU.rsq.clamped.f64(double %src) nounwind readnone
+ store double %rsq_clamped, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
new file mode 100644
index 0000000..683df73
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+
+declare float @llvm.AMDGPU.rsq.clamped.f32(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_clamped_f32
+; SI: V_RSQ_CLAMP_F32_e32
+; EG: RECIPSQRT_CLAMPED
+define void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq_clamped = call float @llvm.AMDGPU.rsq.clamped.f32(float %src) nounwind readnone
+ store float %rsq_clamped, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
new file mode 100644
index 0000000..27cf6b2
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.rsq.f32(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_f32
+; SI: V_RSQ_F32_e32
+; EG: RECIPSQRT_IEEE
+define void @rsq_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq = call float @llvm.AMDGPU.rsq.f32(float %src) nounwind readnone
+ store float %rsq, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
new file mode 100644
index 0000000..1c736d4
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare double @llvm.AMDGPU.trig.preop.f64(double, i32) nounwind readnone
+
+; SI-LABEL: @test_trig_preop_f64:
+; SI-DAG: BUFFER_LOAD_DWORD [[SEG:v[0-9]+]]
+; SI-DAG: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
+; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], [[SEG]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 %b) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @test_trig_preop_f64_imm_segment:
+; SI: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
+; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], 7
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @test_trig_preop_f64_imm_segment(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 7) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.SI.gather4.ll b/test/CodeGen/R600/llvm.SI.gather4.ll
new file mode 100644
index 0000000..8402faa
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.gather4.ll
@@ -0,0 +1,508 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @gather4_v2
+;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4
+;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl
+;CHECK: IMAGE_GATHER4_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l
+;CHECK: IMAGE_GATHER4_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b
+;CHECK: IMAGE_GATHER4_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl
+;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl_v8
+;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz_v2
+;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz
+;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_o
+;CHECK: IMAGE_GATHER4_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl_o
+;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl_o_v8
+;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l_o
+;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l_o_v8
+;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_o
+;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_o_v8
+;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl_o
+;CHECK: IMAGE_GATHER4_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz_o
+;CHECK: IMAGE_GATHER4_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_c
+;CHECK: IMAGE_GATHER4_C {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl
+;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl_v8
+;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l
+;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l_v8
+;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b
+;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_v8
+;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_cl
+;CHECK: IMAGE_GATHER4_C_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz
+;CHECK: IMAGE_GATHER4_C_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_c_o
+;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_o_v8
+;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl_o
+;CHECK: IMAGE_GATHER4_C_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l_o
+;CHECK: IMAGE_GATHER4_C_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_o
+;CHECK: IMAGE_GATHER4_C_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_cl_o
+;CHECK: IMAGE_GATHER4_C_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz_o
+;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz_o_v8
+;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+declare <4 x float> @llvm.SI.gather4.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.getlod.ll b/test/CodeGen/R600/llvm.SI.getlod.ll
new file mode 100644
index 0000000..a7a17ec
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.getlod.ll
@@ -0,0 +1,44 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @getlod
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.i32(i32 undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+;CHECK-LABEL: @getlod_v2
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+;CHECK-LABEL: @getlod_v4
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod_v4() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+
+declare <4 x float> @llvm.SI.getlod.i32(i32, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.getlod.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.getlod.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.exp2.ll b/test/CodeGen/R600/llvm.exp2.ll
index 13bfbab..119d5ef 100644
--- a/test/CodeGen/R600/llvm.exp2.ll
+++ b/test/CodeGen/R600/llvm.exp2.ll
@@ -1,26 +1,79 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-;EG-CHECK-LABEL: @test
-;EG-CHECK: EXP_IEEE *
-;CM-CHECK-LABEL: @test
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
+;FUNC-LABEL: @test
+;EG-CHECK: EXP_IEEE
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
-define void @test(<4 x float> inreg %reg0) #0 {
- %r0 = extractelement <4 x float> %reg0, i32 0
- %r1 = call float @llvm.fabs.f32(float %r0)
- %r2 = fsub float -0.000000e+00, %r1
- %r3 = call float @llvm.exp2.f32(float %r2)
- %vec = insertelement <4 x float> undef, float %r3, i32 0
- call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+define void @test(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.exp2.f32(float %in)
+ store float %0, float addrspace(1)* %out
ret void
}
-declare float @llvm.exp2.f32(float) readnone
-declare float @llvm.fabs.f32(float) readnone
-declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+;FUNC-LABEL: @testv2
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+
+define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+entry:
+ %0 = call <2 x float> @llvm.exp2.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
-attributes #0 = { "ShaderType"="0" }
+;FUNC-LABEL: @testv4
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+entry:
+ %0 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) readnone
+declare <2 x float> @llvm.exp2.v2f32(<2 x float>) readnone
+declare <4 x float> @llvm.exp2.v4f32(<4 x float>) readnone
diff --git a/test/CodeGen/R600/llvm.log2.ll b/test/CodeGen/R600/llvm.log2.ll
new file mode 100644
index 0000000..4cba2d4
--- /dev/null
+++ b/test/CodeGen/R600/llvm.log2.ll
@@ -0,0 +1,79 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
+
+;FUNC-LABEL: @test
+;EG-CHECK: LOG_IEEE
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+
+define void @test(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.log2.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv2
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+
+define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+entry:
+ %0 = call <2 x float> @llvm.log2.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv4
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+entry:
+ %0 = call <4 x float> @llvm.log2.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.log2.f32(float) readnone
+declare <2 x float> @llvm.log2.v2f32(<2 x float>) readnone
+declare <4 x float> @llvm.log2.v4f32(<4 x float>) readnone
diff --git a/test/CodeGen/R600/llvm.rint.f64.ll b/test/CodeGen/R600/llvm.rint.f64.ll
index a7a909a..3e2884b 100644
--- a/test/CodeGen/R600/llvm.rint.f64.ll
+++ b/test/CodeGen/R600/llvm.rint.f64.ll
@@ -1,30 +1,38 @@
; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @f64
+; FUNC-LABEL: @rint_f64
; CI: V_RNDNE_F64_e32
-define void @f64(double addrspace(1)* %out, double %in) {
+
+; SI-DAG: V_ADD_F64
+; SI-DAG: V_ADD_F64
+; SI-DAG V_CMP_GT_F64_e64
+; SI: V_CNDMASK_B32
+; SI: V_CNDMASK_B32
+; SI: S_ENDPGM
+define void @rint_f64(double addrspace(1)* %out, double %in) {
entry:
%0 = call double @llvm.rint.f64(double %in)
store double %0, double addrspace(1)* %out
ret void
}
-; FUNC-LABEL: @v2f64
+; FUNC-LABEL: @rint_v2f64
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
-define void @v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define void @rint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
entry:
%0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %in)
store <2 x double> %0, <2 x double> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: @v4f64
+; FUNC-LABEL: @rint_v4f64
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
; CI: V_RNDNE_F64_e32
-define void @v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define void @rint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
entry:
%0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in)
store <4 x double> %0, <4 x double> addrspace(1)* %out
diff --git a/test/CodeGen/R600/llvm.rint.ll b/test/CodeGen/R600/llvm.rint.ll
index db8352f..209bb43 100644
--- a/test/CodeGen/R600/llvm.rint.ll
+++ b/test/CodeGen/R600/llvm.rint.ll
@@ -1,31 +1,31 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; FUNC-LABEL: @f32
+; FUNC-LABEL: @rint_f32
; R600: RNDNE
; SI: V_RNDNE_F32_e32
-define void @f32(float addrspace(1)* %out, float %in) {
+define void @rint_f32(float addrspace(1)* %out, float %in) {
entry:
- %0 = call float @llvm.rint.f32(float %in)
+ %0 = call float @llvm.rint.f32(float %in) #0
store float %0, float addrspace(1)* %out
ret void
}
-; FUNC-LABEL: @v2f32
+; FUNC-LABEL: @rint_v2f32
; R600: RNDNE
; R600: RNDNE
; SI: V_RNDNE_F32_e32
; SI: V_RNDNE_F32_e32
-define void @v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define void @rint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
- %0 = call <2 x float> @llvm.rint.v2f32(<2 x float> %in)
+ %0 = call <2 x float> @llvm.rint.v2f32(<2 x float> %in) #0
store <2 x float> %0, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: @v4f32
+; FUNC-LABEL: @rint_v4f32
; R600: RNDNE
; R600: RNDNE
; R600: RNDNE
@@ -35,15 +35,27 @@ entry:
; SI: V_RNDNE_F32_e32
; SI: V_RNDNE_F32_e32
; SI: V_RNDNE_F32_e32
-define void @v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define void @rint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
- %0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %in)
+ %0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %in) #0
store <4 x float> %0, <4 x float> addrspace(1)* %out
ret void
}
+; FUNC-LABEL: @legacy_amdil_round_nearest_f32
+; R600: RNDNE
+
+; SI: V_RNDNE_F32_e32
+define void @legacy_amdil_round_nearest_f32(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.AMDIL.round.nearest.f32(float %in) #0
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.AMDIL.round.nearest.f32(float) #0
declare float @llvm.rint.f32(float) #0
declare <2 x float> @llvm.rint.v2f32(<2 x float>) #0
declare <4 x float> @llvm.rint.v4f32(<4 x float>) #0
-attributes #0 = { nounwind readonly }
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index 1486c4d..a57df5c 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -696,8 +696,7 @@ entry:
; R600-CHECK: LDS_READ_RET
; R600-CHECK: LDS_READ_RET
; SI-CHECK: S_MOV_B32 m0
-; SI-CHECK: DS_READ_B32
-; SI-CHECK: DS_READ_B32
+; SI-CHECK: DS_READ_B64
define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) {
entry:
%0 = load <2 x float> addrspace(3)* %in
diff --git a/test/CodeGen/R600/local-atomics.ll b/test/CodeGen/R600/local-atomics.ll
new file mode 100644
index 0000000..5a44951
--- /dev/null
+++ b/test/CodeGen/R600/local-atomics.ll
@@ -0,0 +1,254 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_WRXCHG_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i32_offset:
+; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - Is it really necessary to load 4 into VGPR?
+; FUNC-LABEL: @lds_atomic_add_ret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_ADD_RTN_U32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i32_offset:
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i32:
+; SI: DS_SUB_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i32_offset:
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i32:
+; SI: DS_AND_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i32_offset:
+; SI: DS_AND_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i32:
+; SI: DS_OR_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i32_offset:
+; SI: DS_OR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i32:
+; SI: DS_XOR_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i32_offset:
+; SI: DS_XOR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_ret_i32:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
+; store i32 %result, i32 addrspace(1)* %out, align 4
+; ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_ret_i32:
+; SI: DS_MIN_RTN_I32
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_ret_i32_offset:
+; SI: DS_MIN_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i32:
+; SI: DS_MAX_RTN_I32
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i32_offset:
+; SI: DS_MAX_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i32:
+; SI: DS_MIN_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i32_offset:
+; SI: DS_MIN_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i32:
+; SI: DS_MAX_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i32_offset:
+; SI: DS_MAX_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/local-atomics64.ll b/test/CodeGen/R600/local-atomics64.ll
new file mode 100644
index 0000000..849b033
--- /dev/null
+++ b/test/CodeGen/R600/local-atomics64.ll
@@ -0,0 +1,251 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i64:
+; SI: DS_WRXCHG_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i64_offset:
+; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i64:
+; SI: DS_ADD_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i64_offset:
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI: DS_ADD_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
+ %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_INC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i64_offset:
+; SI: DS_INC_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i64:
+; SI: DS_SUB_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i64_offset:
+; SI: DS_SUB_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_DEC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i64_offset:
+; SI: DS_DEC_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i64:
+; SI: DS_AND_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i64_offset:
+; SI: DS_AND_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i64:
+; SI: DS_OR_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i64_offset:
+; SI: DS_OR_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i64:
+; SI: DS_XOR_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i64_offset:
+; SI: DS_XOR_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_ret_i64:
+; SI: DS_MIN_RTN_I64
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_ret_i64_offset:
+; SI: DS_MIN_RTN_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i64:
+; SI: DS_MAX_RTN_I64
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i64_offset:
+; SI: DS_MAX_RTN_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i64:
+; SI: DS_MIN_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i64_offset:
+; SI: DS_MIN_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i64:
+; SI: DS_MAX_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i64_offset:
+; SI: DS_MAX_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/local-memory-two-objects.ll b/test/CodeGen/R600/local-memory-two-objects.ll
index 1e42285..e29e4cc 100644
--- a/test/CodeGen/R600/local-memory-two-objects.ll
+++ b/test/CodeGen/R600/local-memory-two-objects.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-@local_memory_two_objects.local_mem0 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
-@local_memory_two_objects.local_mem1 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
+@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
+@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
; EG-CHECK: @local_memory_two_objects
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
index 6ebe41d..51af484 100644
--- a/test/CodeGen/R600/local-memory.ll
+++ b/test/CodeGen/R600/local-memory.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=CI-CHECK %s
-@local_memory.local_mem = internal addrspace(3) unnamed_addr global [128 x i32] zeroinitializer, align 4
+@local_memory.local_mem = internal unnamed_addr addrspace(3) global [128 x i32] zeroinitializer, align 4
; EG-CHECK-LABEL: @local_memory
; SI-CHECK-LABEL: @local_memory
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
index 6ed754c..d231e92 100644
--- a/test/CodeGen/R600/mul.ll
+++ b/test/CodeGen/R600/mul.ll
@@ -1,14 +1,14 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; mul24 and mad24 are affected
-;FUNC-LABEL: @test2
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test2
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -19,16 +19,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;FUNC-LABEL: @test4
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test4
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -39,11 +39,11 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
ret void
}
-; SI-CHECK-LABEL: @trunc_i64_mul_to_i32
-; SI-CHECK: S_LOAD_DWORD
-; SI-CHECK: S_LOAD_DWORD
-; SI-CHECK: V_MUL_LO_I32
-; SI-CHECK: BUFFER_STORE_DWORD
+; FUNC-LABEL: @trunc_i64_mul_to_i32
+; SI: S_LOAD_DWORD
+; SI: S_LOAD_DWORD
+; SI: V_MUL_LO_I32
+; SI: BUFFER_STORE_DWORD
define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
%mul = mul i64 %b, %a
%trunc = trunc i64 %mul to i32
diff --git a/test/CodeGen/R600/no-initializer-constant-addrspace.ll b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
new file mode 100644
index 0000000..ab82e7e
--- /dev/null
+++ b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=r600 -mcpu=SI -o /dev/null %s
+; RUN: llc -march=r600 -mcpu=cypress -o /dev/null %s
+
+@extern_const_addrspace = external unnamed_addr addrspace(2) constant [5 x i32], align 4
+
+; FUNC-LABEL: @load_extern_const_init
+define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+@undef_const_addrspace = unnamed_addr addrspace(2) constant [5 x i32] undef, align 4
+
+; FUNC-LABEL: @load_undef_const_init
+define void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 9878366..91a70b7 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -116,9 +116,9 @@ define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64
}
; SI-LABEL: @trunc_i64_or_to_i32
-; SI: S_LOAD_DWORD [[SREG0:s[0-9]+]],
-; SI: S_LOAD_DWORD [[SREG1:s[0-9]+]],
-; SI: S_OR_B32 [[SRESULT:s[0-9]+]], [[SREG1]], [[SREG0]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG0:[0-9]+]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG1:[0-9]+]]
+; SI: S_OR_B32 [[SRESULT:s[0-9]+]], s[[SREG1]], s[[SREG0]]
; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; SI: BUFFER_STORE_DWORD [[VRESULT]],
define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
diff --git a/test/CodeGen/R600/parallelandifcollapse.ll b/test/CodeGen/R600/parallelandifcollapse.ll
index 4afaf68..8a269e0 100644
--- a/test/CodeGen/R600/parallelandifcollapse.ll
+++ b/test/CodeGen/R600/parallelandifcollapse.ll
@@ -7,6 +7,12 @@
; CHECK: AND_INT
; CHECK-NEXT: AND_INT
; CHECK-NEXT: OR_INT
+
+; FIXME: For some reason having the allocas here allowed the flatten cfg pass
+; to do its transfomation, however now that we are using local memory for
+; allocas, the transformation isn't happening.
+; XFAIL: *
+
define void @_Z9chk1D_512v() #0 {
entry:
%a0 = alloca i32, align 4
diff --git a/test/CodeGen/R600/parallelorifcollapse.ll b/test/CodeGen/R600/parallelorifcollapse.ll
index b0db7cd..feca688 100644
--- a/test/CodeGen/R600/parallelorifcollapse.ll
+++ b/test/CodeGen/R600/parallelorifcollapse.ll
@@ -3,6 +3,11 @@
;
; CFG flattening should use parallel-or to generate branch conditions and
; then merge if-regions with the same bodies.
+
+; FIXME: For some reason having the allocas here allowed the flatten cfg pass
+; to do its transfomation, however now that we are using local memory for
+; allocas, the transformation isn't happening.
+; XFAIL: *
;
; CHECK: OR_INT
; CHECK-NEXT: OR_INT
diff --git a/test/CodeGen/R600/private-memory-atomics.ll b/test/CodeGen/R600/private-memory-atomics.ll
new file mode 100644
index 0000000..def4f9d
--- /dev/null
+++ b/test/CodeGen/R600/private-memory-atomics.ll
@@ -0,0 +1,31 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s
+
+; This works because promote allocas pass replaces these with LDS atomics.
+
+; Private atomics have no real use, but at least shouldn't crash on it.
+define void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp4 = atomicrmw add i32* %tmp3, i32 7 acq_rel
+ store i32 %tmp4, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp4 = cmpxchg i32* %tmp3, i32 0, i32 1 acq_rel monotonic
+ %val = extractvalue { i32, i1 } %tmp4, 0
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/private-memory-broken.ll b/test/CodeGen/R600/private-memory-broken.ll
new file mode 100644
index 0000000..4086085
--- /dev/null
+++ b/test/CodeGen/R600/private-memory-broken.ll
@@ -0,0 +1,20 @@
+; RUN: not llc -verify-machineinstrs -march=r600 -mcpu=SI %s -o /dev/null 2>&1 | FileCheck %s
+
+; Make sure promote alloca pass doesn't crash
+
+; CHECK: unsupported call
+
+declare i32 @foo(i32*) nounwind
+
+define void @call_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %val = call i32 @foo(i32* %tmp3) nounwind
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/private-memory.ll b/test/CodeGen/R600/private-memory.ll
index d3453f2..89122be 100644
--- a/test/CodeGen/R600/private-memory.ll
+++ b/test/CodeGen/R600/private-memory.ll
@@ -1,24 +1,19 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=R600-CHECK --check-prefix=FUNC
; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-; This test checks that uses and defs of the AR register happen in the same
-; instruction clause.
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; FUNC-LABEL: @mova_same_clause
-; R600-CHECK: MOVA_INT
-; R600-CHECK-NOT: ALU clause
-; R600-CHECK: 0 + AR.x
-; R600-CHECK: MOVA_INT
-; R600-CHECK-NOT: ALU clause
-; R600-CHECK: 0 + AR.x
-
-; SI-CHECK: V_READFIRSTLANE_B32 vcc_lo
-; SI-CHECK: V_MOVRELD
-; SI-CHECK: S_CBRANCH
-; SI-CHECK: V_READFIRSTLANE_B32 vcc_lo
-; SI-CHECK: V_MOVRELD
-; SI-CHECK: S_CBRANCH
+; R600-CHECK: LDS_WRITE
+; R600-CHECK: LDS_WRITE
+; R600-CHECK: LDS_READ
+; R600-CHECK: LDS_READ
+
+; SI-CHECK: DS_WRITE_B32
+; SI-CHECK: DS_WRITE_B32
+; SI-CHECK: DS_READ_B32
+; SI-CHECK: DS_READ_B32
define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
%stack = alloca [5 x i32], align 4
@@ -114,12 +109,8 @@ for.end:
; FUNC-LABEL: @short_array
-; R600-CHECK: MOV {{\** *}}T{{[0-9]\.[XYZW]}}, literal
-; R600-CHECK: 65536
-; R600-CHECK: *
; R600-CHECK: MOVA_INT
-; SI-CHECK: V_MOV_B32_e32 v{{[0-9]}}, 0x10000
; SI-CHECK: V_MOVRELS_B32_e32
define void @short_array(i32 addrspace(1)* %out, i32 %index) {
entry:
@@ -137,10 +128,7 @@ entry:
; FUNC-LABEL: @char_array
-; R600-CHECK: OR_INT {{\** *}}T{{[0-9]\.[XYZW]}}, {{[PVT0-9]+\.[XYZW]}}, literal
-; R600-CHECK: 256
-; R600-CHECK: *
-; R600-CHECK-NEXT: MOVA_INT
+; R600-CHECK: MOVA_INT
; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}, 0x100
; SI-CHECK: V_MOVRELS_B32_e32
@@ -185,7 +173,9 @@ entry:
; Test that two stack objects are not stored in the same register
; The second stack object should be in T3.X
; FUNC-LABEL: @no_overlap
-; R600-CHECK: MOV {{\** *}}T3.X
+; R600_CHECK: MOV
+; R600_CHECK: [[CHAN:[XYZW]]]+
+; R600-CHECK-NOT: [[CHAN]]+
; SI-CHECK: V_MOV_B32_e32 v3
define void @no_overlap(i32 addrspace(1)* %out, i32 %in) {
entry:
@@ -211,6 +201,85 @@ entry:
ret void
}
+define void @char_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i8]]
+ %gep0 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
+ store i8 0, i8* %gep0
+ store i8 1, i8* %gep1
+ %gep2 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i8* %gep2
+ %sext = sext i8 %load to i32
+ store i32 %sext, i32 addrspace(1)* %out
+ ret void
+}
+define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i32]]
+ %gep0 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i64]]
+ %gep0 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
+ store i64 0, i64* %gep0
+ store i64 1, i64* %gep1
+ %gep2 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i64* %gep2
+ store i64 %load, i64 addrspace(1)* %out
+ ret void
+}
+
+%struct.pair32 = type { i32, i32 }
+
+define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x %struct.pair32]]
+ %gep0 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x %struct.pair32]
+ %gep0 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %cmp = icmp eq i32 %in, 0
+ %sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
+ %load = load i32* %sel
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
-declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/pv.ll b/test/CodeGen/R600/pv.ll
index f322bc7..55eb56d 100644
--- a/test/CodeGen/R600/pv.ll
+++ b/test/CodeGen/R600/pv.ll
@@ -103,7 +103,7 @@ main_body:
%95 = insertelement <4 x float> %94, float 0.000000e+00, i32 3
%96 = call float @llvm.AMDGPU.dp4(<4 x float> %91, <4 x float> %95)
%97 = call float @fabs(float %96)
- %98 = call float @llvm.AMDGPU.rsq(float %97)
+ %98 = call float @llvm.AMDGPU.rsq.f32(float %97)
%99 = fmul float %4, %98
%100 = fmul float %5, %98
%101 = fmul float %6, %98
@@ -225,7 +225,7 @@ declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
declare float @fabs(float) #2
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
+declare float @llvm.AMDGPU.rsq.f32(float) #1
; Function Attrs: readnone
declare float @llvm.AMDIL.clamp.(float, float, float) #1
diff --git a/test/CodeGen/R600/reorder-stores.ll b/test/CodeGen/R600/reorder-stores.ll
new file mode 100644
index 0000000..be2fcc6
--- /dev/null
+++ b/test/CodeGen/R600/reorder-stores.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @no_reorder_v2f64_global_load_store
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
+ %tmp1 = load <2 x double> addrspace(1)* %x, align 16
+ %tmp4 = load <2 x double> addrspace(1)* %y, align 16
+ store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
+ store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
+ ret void
+}
+
+; SI-LABEL: @no_reorder_scalarized_v2f64_local_load_store
+; SI: DS_READ_B64
+; SI: DS_READ_B64
+; SI: DS_WRITE_B64
+; SI: DS_WRITE_B64
+; SI: S_ENDPGM
+define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
+ %tmp1 = load <2 x double> addrspace(3)* %x, align 16
+ %tmp4 = load <2 x double> addrspace(3)* %y, align 16
+ store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
+ store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
+ ret void
+}
+
+; SI-LABEL: @no_reorder_split_v8i32_global_load_store
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
+ %tmp1 = load <8 x i32> addrspace(1)* %x, align 32
+ %tmp4 = load <8 x i32> addrspace(1)* %y, align 32
+ store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
+ store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
+ ret void
+}
+
+; SI-LABEL: @no_reorder_extload_64
+; SI: DS_READ_B64
+; SI: DS_READ_B64
+; SI: DS_WRITE_B64
+; SI-NOT: DS_READ
+; SI: DS_WRITE_B64
+; SI: S_ENDPGM
+define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
+ %tmp1 = load <2 x i32> addrspace(3)* %x, align 8
+ %tmp4 = load <2 x i32> addrspace(3)* %y, align 8
+ %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>
+ %tmp9 = add <2 x i64> %tmp4ext, <i64 1, i64 1>
+ %trunctmp9 = trunc <2 x i64> %tmp9 to <2 x i32>
+ %trunctmp7 = trunc <2 x i64> %tmp7 to <2 x i32>
+ store <2 x i32> %trunctmp9, <2 x i32> addrspace(3)* %x, align 8
+ store <2 x i32> %trunctmp7, <2 x i32> addrspace(3)* %y, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/rotl.i64.ll b/test/CodeGen/R600/rotl.i64.ll
new file mode 100644
index 0000000..bda0b66
--- /dev/null
+++ b/test/CodeGen/R600/rotl.i64.ll
@@ -0,0 +1,34 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @s_rotl_i64:
+; SI: S_LSHL_B64
+; SI: S_SUB_I32
+; SI: S_LSHR_B64
+; SI: S_OR_B64
+define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+entry:
+ %0 = shl i64 %x, %y
+ %1 = sub i64 64, %y
+ %2 = lshr i64 %x, %1
+ %3 = or i64 %0, %2
+ store i64 %3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotl_i64:
+; SI: V_LSHL_B64
+; SI: V_SUB_I32
+; SI: V_LSHR_B64
+; SI: V_OR_B32
+; SI: V_OR_B32
+define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+entry:
+ %x = load i64 addrspace(1)* %xptr, align 8
+ %y = load i64 addrspace(1)* %yptr, align 8
+ %tmp0 = shl i64 %x, %y
+ %tmp1 = sub i64 64, %y
+ %tmp2 = lshr i64 %x, %tmp1
+ %tmp3 = or i64 %tmp0, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/rotl.ll b/test/CodeGen/R600/rotl.ll
new file mode 100644
index 0000000..83f657f
--- /dev/null
+++ b/test/CodeGen/R600/rotl.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @rotl_i32:
+; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
+; R600-NEXT: 32
+; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
+
+; SI: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
+; SI: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
+; SI: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
+define void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+entry:
+ %0 = shl i32 %x, %y
+ %1 = sub i32 32, %y
+ %2 = lshr i32 %x, %1
+ %3 = or i32 %0, %2
+ store i32 %3, i32 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v2i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+entry:
+ %0 = shl <2 x i32> %x, %y
+ %1 = sub <2 x i32> <i32 32, i32 32>, %y
+ %2 = lshr <2 x i32> %x, %1
+ %3 = or <2 x i32> %0, %2
+ store <2 x i32> %3, <2 x i32> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v4i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
+entry:
+ %0 = shl <4 x i32> %x, %y
+ %1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
+ %2 = lshr <4 x i32> %x, %1
+ %3 = or <4 x i32> %0, %2
+ store <4 x i32> %3, <4 x i32> addrspace(1)* %in
+ ret void
+}
diff --git a/test/CodeGen/R600/rotr.i64.ll b/test/CodeGen/R600/rotr.i64.ll
new file mode 100644
index 0000000..c264751
--- /dev/null
+++ b/test/CodeGen/R600/rotr.i64.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @s_rotr_i64
+; SI: S_LSHR_B64
+; SI: S_SUB_I32
+; SI: S_LSHL_B64
+; SI: S_OR_B64
+define void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+entry:
+ %tmp0 = sub i64 64, %y
+ %tmp1 = shl i64 %x, %tmp0
+ %tmp2 = lshr i64 %x, %y
+ %tmp3 = or i64 %tmp1, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotr_i64
+; SI: V_LSHR_B64
+; SI: V_SUB_I32
+; SI: V_LSHL_B64
+; SI: V_OR_B32
+; SI: V_OR_B32
+define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+entry:
+ %x = load i64 addrspace(1)* %xptr, align 8
+ %y = load i64 addrspace(1)* %yptr, align 8
+ %tmp0 = sub i64 64, %y
+ %tmp1 = shl i64 %x, %tmp0
+ %tmp2 = lshr i64 %x, %y
+ %tmp3 = or i64 %tmp1, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @s_rotr_v2i64
+define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
+entry:
+ %tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
+ %tmp1 = shl <2 x i64> %x, %tmp0
+ %tmp2 = lshr <2 x i64> %x, %y
+ %tmp3 = or <2 x i64> %tmp1, %tmp2
+ store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotr_v2i64
+define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
+entry:
+ %x = load <2 x i64> addrspace(1)* %xptr, align 8
+ %y = load <2 x i64> addrspace(1)* %yptr, align 8
+ %tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
+ %tmp1 = shl <2 x i64> %x, %tmp0
+ %tmp2 = lshr <2 x i64> %x, %y
+ %tmp3 = or <2 x i64> %tmp1, %tmp2
+ store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
+ ret void
+}
diff --git a/test/CodeGen/R600/rotr.ll b/test/CodeGen/R600/rotr.ll
index edf7aee..a5a4da4 100644
--- a/test/CodeGen/R600/rotr.ll
+++ b/test/CodeGen/R600/rotr.ll
@@ -1,37 +1,52 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @rotr:
-; R600-CHECK: BIT_ALIGN_INT
+; FUNC-LABEL: @rotr_i32:
+; R600: BIT_ALIGN_INT
-; SI-CHECK-LABEL: @rotr:
-; SI-CHECK: V_ALIGNBIT_B32
-define void @rotr(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+; SI: V_ALIGNBIT_B32
+define void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
- %0 = sub i32 32, %y
- %1 = shl i32 %x, %0
- %2 = lshr i32 %x, %y
- %3 = or i32 %1, %2
- store i32 %3, i32 addrspace(1)* %in
+ %tmp0 = sub i32 32, %y
+ %tmp1 = shl i32 %x, %tmp0
+ %tmp2 = lshr i32 %x, %y
+ %tmp3 = or i32 %tmp1, %tmp2
+ store i32 %tmp3, i32 addrspace(1)* %in
ret void
}
-; R600-CHECK-LABEL: @rotl:
-; R600-CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
-; R600-CHECK-NEXT: 32
-; R600-CHECK: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
+; FUNC-LABEL: @rotr_v2i32:
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+define void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+entry:
+ %tmp0 = sub <2 x i32> <i32 32, i32 32>, %y
+ %tmp1 = shl <2 x i32> %x, %tmp0
+ %tmp2 = lshr <2 x i32> %x, %y
+ %tmp3 = or <2 x i32> %tmp1, %tmp2
+ store <2 x i32> %tmp3, <2 x i32> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotr_v4i32:
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
-; SI-CHECK-LABEL: @rotl:
-; SI-CHECK: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
-; SI-CHECK: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
-; SI-CHECK: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
-define void @rotl(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+define void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
entry:
- %0 = shl i32 %x, %y
- %1 = sub i32 32, %y
- %2 = lshr i32 %x, %1
- %3 = or i32 %0, %2
- store i32 %3, i32 addrspace(1)* %in
+ %tmp0 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
+ %tmp1 = shl <4 x i32> %x, %tmp0
+ %tmp2 = lshr <4 x i32> %x, %y
+ %tmp3 = or <4 x i32> %tmp1, %tmp2
+ store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
ret void
}
diff --git a/test/CodeGen/R600/rsq.ll b/test/CodeGen/R600/rsq.ll
new file mode 100644
index 0000000..87c0570
--- /dev/null
+++ b/test/CodeGen/R600/rsq.ll
@@ -0,0 +1,26 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; SI-LABEL: @rsq_f32
+; SI: V_RSQ_F32_e32
+; SI: S_ENDPGM
+define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float addrspace(1)* %in, align 4
+ %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv float 1.0, %sqrt
+ store float %div, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @rsq_f64
+; SI: V_RSQ_F64_e32
+; SI: S_ENDPGM
+define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+ %val = load double addrspace(1)* %in, align 4
+ %sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
+ %div = fdiv double 1.0, %sqrt
+ store double %div, double addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/saddo.ll b/test/CodeGen/R600/saddo.ll
new file mode 100644
index 0000000..c80480e
--- /dev/null
+++ b/test/CodeGen/R600/saddo.ll
@@ -0,0 +1,62 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @saddo_i64_zext
+define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_saddo_i32
+define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %sadd, 0
+ %carry = extractvalue { i32, i1 } %sadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_saddo_i32
+define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %sadd, 0
+ %carry = extractvalue { i32, i1 } %sadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_saddo_i64
+define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_saddo_i64
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+define void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/scalar_to_vector.ll b/test/CodeGen/R600/scalar_to_vector.ll
new file mode 100644
index 0000000..bcccb06
--- /dev/null
+++ b/test/CodeGen/R600/scalar_to_vector.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+
+; FUNC-LABEL: @scalar_to_vector_v2i32
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: S_ENDPGM
+define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %tmp1 = load i32 addrspace(1)* %in, align 4
+ %bc = bitcast i32 %tmp1 to <2 x i16>
+ %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @scalar_to_vector_v2f32
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: S_ENDPGM
+define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tmp1 = load float addrspace(1)* %in, align 4
+ %bc = bitcast float %tmp1 to <2 x i16>
+ %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; Getting a SCALAR_TO_VECTOR seems to be tricky. These cases managed
+; to produce one, but for some reason never made it to selection.
+
+
+; define void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+; %tmp1 = load i32 addrspace(1)* %in, align 4
+; %bc = bitcast i32 %tmp1 to <4 x i8>
+
+; %tmp2 = shufflevector <4 x i8> %bc, <4 x i8> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+; store <8 x i8> %tmp2, <8 x i8> addrspace(1)* %out, align 4
+; ret void
+; }
+
+; define void @scalar_to_vector_test3(<4 x i32> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i64> undef, i64 12345, i32 0
+; %newvec1 = insertelement <2 x i64> %newvec0, i64 undef, i32 1
+; %bc = bitcast <2 x i64> %newvec1 to <4 x i32>
+; %add = add <4 x i32> %bc, <i32 1, i32 2, i32 3, i32 4>
+; store <4 x i32> %add, <4 x i32> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test4(<8 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <4 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <4 x i32> %newvec0 to <8 x i16>
+; %add = add <8 x i16> %bc, <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>
+; store <8 x i16> %add, <8 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test5(<4 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
+; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
+; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test6(<4 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
+; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
+; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll
index 3dd10c8..e922d5c 100644
--- a/test/CodeGen/R600/sdiv.ll
+++ b/test/CodeGen/R600/sdiv.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by sdiv is long and complex and may frequently change.
; The goal of this test is to make sure the ISel doesn't fail.
@@ -9,9 +10,9 @@
; This was fixed by adding an additional pattern in R600Instructions.td to
; match this pattern with a CNDGE_INT.
-; CHECK: CF_END
-
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+; FUNC-LABEL: @sdiv_i32
+; EG: CF_END
+define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in
%den = load i32 addrspace(1) * %den_ptr
@@ -19,3 +20,84 @@ define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
store i32 %result, i32 addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @sdiv_i32_4
+define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = sdiv i32 %num, 4
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; Multiply by a weird constant to make sure setIntDivIsCheap is
+; working.
+
+; FUNC-LABEL: @slow_sdiv_i32_3435
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[MAGIC:v[0-9]+]], 0x98a1930b
+; SI: V_MUL_HI_I32 [[TMP:v[0-9]+]], [[VAL]], [[MAGIC]]
+; SI: V_ADD_I32
+; SI: V_LSHRREV_B32
+; SI: V_ASHRREV_I32
+; SI: V_ADD_I32
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = sdiv i32 %num, 3435
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %num = load <2 x i32> addrspace(1) * %in
+ %den = load <2 x i32> addrspace(1) * %den_ptr
+ %result = sdiv <2 x i32> %num, %den
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %num = load <2 x i32> addrspace(1) * %in
+ %result = sdiv <2 x i32> %num, <i32 4, i32 4>
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %num = load <4 x i32> addrspace(1) * %in
+ %den = load <4 x i32> addrspace(1) * %den_ptr
+ %result = sdiv <4 x i32> %num, %den
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %num = load <4 x i32> addrspace(1) * %in
+ %result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; Tests for 64-bit divide bypass.
+; define void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %result = sdiv i64 %a, %b
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; define void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %result = srem i64 %a, %b
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; define void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %resultdiv = sdiv i64 %a, %b
+; %resultrem = srem i64 %a, %b
+; %result = add i64 %resultdiv, %resultrem
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
diff --git a/test/CodeGen/R600/setcc-equivalent.ll b/test/CodeGen/R600/setcc-equivalent.ll
index 4c50aa3..f796748 100644
--- a/test/CodeGen/R600/setcc-equivalent.ll
+++ b/test/CodeGen/R600/setcc-equivalent.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+; XFAIL: *
; EG-LABEL: @and_setcc_setcc_i32
; EG: AND_INT
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
index c581d86..c7d5bf9 100644
--- a/test/CodeGen/R600/sgpr-copy.ll
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -70,7 +70,7 @@ main_body:
%55 = fadd float %54, %53
%56 = fmul float %45, %45
%57 = fadd float %55, %56
- %58 = call float @llvm.AMDGPU.rsq(float %57)
+ %58 = call float @llvm.AMDGPU.rsq.f32(float %57)
%59 = fmul float %43, %58
%60 = fmul float %44, %58
%61 = fmul float %45, %58
@@ -212,7 +212,7 @@ declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32) #1
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #3
+declare float @llvm.AMDGPU.rsq.f32(float) #3
; Function Attrs: readnone
declare float @llvm.AMDIL.exp.(float) #3
diff --git a/test/CodeGen/R600/shl.ll b/test/CodeGen/R600/shl.ll
index 4a6aab4..43fab2a 100644
--- a/test/CodeGen/R600/shl.ll
+++ b/test/CodeGen/R600/shl.ll
@@ -39,5 +39,118 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-; XXX: Add SI test for i64 shl once i64 stores and i64 function arguments are
-; supported.
+;EG-CHECK: @shl_i64
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
+
+;SI-CHECK: @shl_i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = shl i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @shl_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @shl_v2i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = shl <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @shl_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @shl_v4i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = shl <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/si-sgpr-spill.ll b/test/CodeGen/R600/si-sgpr-spill.ll
index b34a757..53a0965 100644
--- a/test/CodeGen/R600/si-sgpr-spill.ll
+++ b/test/CodeGen/R600/si-sgpr-spill.ll
@@ -203,7 +203,7 @@ main_body:
%198 = fadd float %197, %196
%199 = fmul float %97, %97
%200 = fadd float %198, %199
- %201 = call float @llvm.AMDGPU.rsq(float %200)
+ %201 = call float @llvm.AMDGPU.rsq.f32(float %200)
%202 = fmul float %95, %201
%203 = fmul float %96, %201
%204 = fmul float %202, %29
@@ -384,7 +384,7 @@ IF67: ; preds = %LOOP65
%355 = fadd float %354, %353
%356 = fmul float %352, %352
%357 = fadd float %355, %356
- %358 = call float @llvm.AMDGPU.rsq(float %357)
+ %358 = call float @llvm.AMDGPU.rsq.f32(float %357)
%359 = fmul float %350, %358
%360 = fmul float %351, %358
%361 = fmul float %352, %358
@@ -512,7 +512,7 @@ IF67: ; preds = %LOOP65
%483 = fadd float %482, %481
%484 = fmul float %109, %109
%485 = fadd float %483, %484
- %486 = call float @llvm.AMDGPU.rsq(float %485)
+ %486 = call float @llvm.AMDGPU.rsq.f32(float %485)
%487 = fmul float %107, %486
%488 = fmul float %108, %486
%489 = fmul float %109, %486
@@ -541,7 +541,7 @@ IF67: ; preds = %LOOP65
%512 = fadd float %511, %510
%513 = fmul float %97, %97
%514 = fadd float %512, %513
- %515 = call float @llvm.AMDGPU.rsq(float %514)
+ %515 = call float @llvm.AMDGPU.rsq.f32(float %514)
%516 = fmul float %95, %515
%517 = fmul float %96, %515
%518 = fmul float %97, %515
@@ -658,7 +658,7 @@ declare i32 @llvm.SI.tid() #2
declare float @ceil(float) #3
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #2
+declare float @llvm.AMDGPU.rsq.f32(float) #2
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.SI.sampled.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32) #1
@@ -887,7 +887,7 @@ main_body:
%212 = fadd float %211, %210
%213 = fmul float %209, %209
%214 = fadd float %212, %213
- %215 = call float @llvm.AMDGPU.rsq(float %214)
+ %215 = call float @llvm.AMDGPU.rsq.f32(float %214)
%216 = fmul float %205, %215
%217 = fmul float %207, %215
%218 = fmul float %209, %215
@@ -1123,7 +1123,7 @@ IF189: ; preds = %LOOP
%434 = fsub float -0.000000e+00, %433
%435 = fadd float 0x3FF00068E0000000, %434
%436 = call float @llvm.AMDIL.clamp.(float %435, float 0.000000e+00, float 1.000000e+00)
- %437 = call float @llvm.AMDGPU.rsq(float %436)
+ %437 = call float @llvm.AMDGPU.rsq.f32(float %436)
%438 = fmul float %437, %436
%439 = fsub float -0.000000e+00, %436
%440 = call float @llvm.AMDGPU.cndlt(float %439, float %438, float 0.000000e+00)
@@ -1147,7 +1147,7 @@ IF189: ; preds = %LOOP
%458 = fadd float %457, %456
%459 = fmul float %455, %455
%460 = fadd float %458, %459
- %461 = call float @llvm.AMDGPU.rsq(float %460)
+ %461 = call float @llvm.AMDGPU.rsq.f32(float %460)
%462 = fmul float %451, %461
%463 = fmul float %453, %461
%464 = fmul float %455, %461
@@ -1257,7 +1257,7 @@ ENDIF197: ; preds = %IF189, %IF198
%559 = fadd float %558, %557
%560 = fmul float %556, %556
%561 = fadd float %559, %560
- %562 = call float @llvm.AMDGPU.rsq(float %561)
+ %562 = call float @llvm.AMDGPU.rsq.f32(float %561)
%563 = fmul float %562, %561
%564 = fsub float -0.000000e+00, %561
%565 = call float @llvm.AMDGPU.cndlt(float %564, float %563, float 0.000000e+00)
diff --git a/test/CodeGen/R600/sign_extend.ll b/test/CodeGen/R600/sign_extend.ll
index 1212cee..e3bee50 100644
--- a/test/CodeGen/R600/sign_extend.ll
+++ b/test/CodeGen/R600/sign_extend.ll
@@ -1,12 +1,61 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; SI-LABEL: @s_sext_i1_to_i32:
+; SI: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %cmp = icmp eq i32 %a, %b
+ %sext = sext i1 %cmp to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
-; CHECK: V_ASHR
-define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+; SI-LABEL: @test:
+; SI: V_ASHR
+; SI: S_ENDPG
+define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
entry:
- %0 = mul i32 %a, %b
- %1 = add i32 %0, %c
- %2 = sext i32 %1 to i64
- store i64 %2, i64 addrspace(1)* %out
+ %mul = mul i32 %a, %b
+ %add = add i32 %mul, %c
+ %sext = sext i32 %add to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i1_to_i64:
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %cmp = icmp eq i32 %a, %b
+ %sext = sext i1 %cmp to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i32_to_i64:
+; SI: S_ASHR_I32
+; SI: S_ENDPGM
+define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
+ %sext = sext i32 %a to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @v_sext_i32_to_i64:
+; SI: V_ASHR
+; SI: S_ENDPGM
+define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %sext = sext i32 %val to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i16_to_i64:
+; SI: S_ENDPGM
+define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
+ %sext = sext i16 %a to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
ret void
}
diff --git a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
index d9f60ea..dee4326 100644
--- a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
+++ b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
@@ -1,5 +1,7 @@
; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; XFAIL: *
+
; 64-bit select was originally lowered with a build_pair, and this
; could be simplified to 1 cndmask instead of 2, but that broken when
; it started being implemented with a v2i32 build_vector and
@@ -12,9 +14,10 @@ define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
ret void
}
+; FIXME: Fix truncating store for local memory
; SI-LABEL: @trunc_load_alloca_i64:
-; SI: V_MOVRELS_B32
-; SI-NOT: V_MOVRELS_B32
+; SI: DS_READ_B32
+; SI-NOT: DS_READ_B64
; SI: S_ENDPGM
define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
%idx = add i32 %a, %b
diff --git a/test/CodeGen/R600/sint_to_fp.ll b/test/CodeGen/R600/sint_to_fp.ll
index 9241799..b27dfda 100644
--- a/test/CodeGen/R600/sint_to_fp.ll
+++ b/test/CodeGen/R600/sint_to_fp.ll
@@ -29,3 +29,25 @@ define void @sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @sint_to_fp_i1_f32:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, -1.000000e+00, [[CMP]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sint_to_fp_i1_f32_load:
+; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, -1.000000e+00
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) {
+ %fp = sitofp i1 %in to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/sint_to_fp64.ll b/test/CodeGen/R600/sint_to_fp64.ll
index 5abc9d1..12b8cf5 100644
--- a/test/CodeGen/R600/sint_to_fp64.ll
+++ b/test/CodeGen/R600/sint_to_fp64.ll
@@ -1,9 +1,35 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; CHECK: @sint_to_fp64
-; CHECK: V_CVT_F64_I32_e32
+; SI: @sint_to_fp64
+; SI: V_CVT_F64_I32_e32
define void @sint_to_fp64(double addrspace(1)* %out, i32 %in) {
%result = sitofp i32 %in to double
store double %result, double addrspace(1)* %out
ret void
}
+
+; SI-LABEL: @sint_to_fp_i1_f64:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
+; we should be able to fold the SGPRs into the V_CNDMASK instructions.
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = sitofp i1 %cmp to double
+ store double %fp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @sint_to_fp_i1_f64_load:
+; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, -1
+; SI-NEXT: V_CVT_F64_I32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+ %fp = sitofp i1 %in to double
+ store double %fp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/sra.ll b/test/CodeGen/R600/sra.ll
index fe9df10..9eb3dc5 100644
--- a/test/CodeGen/R600/sra.ll
+++ b/test/CodeGen/R600/sra.ll
@@ -52,3 +52,133 @@ entry:
ret void
}
+;EG-CHECK-LABEL: @ashr_i64_2
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: ASHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: ASHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-CHECK-DAG: ASHR {{\*? *}}[[HIBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+
+;SI-CHECK-LABEL: @ashr_i64_2
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+entry:
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = ashr i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK-LABEL: @ashr_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK-LABEL: @ashr_v2i64
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = ashr <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK-LABEL: @ashr_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK-LABEL: @ashr_v4i64
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = ashr <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/srem.ll b/test/CodeGen/R600/srem.ll
new file mode 100644
index 0000000..65e3395
--- /dev/null
+++ b/test/CodeGen/R600/srem.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=r600 -mcpu=redwood < %s
+
+define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %num = load i32 addrspace(1) * %in
+ %den = load i32 addrspace(1) * %den_ptr
+ %result = srem i32 %num, %den
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = srem i32 %num, 4
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %num = load <2 x i32> addrspace(1) * %in
+ %den = load <2 x i32> addrspace(1) * %den_ptr
+ %result = srem <2 x i32> %num, %den
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %num = load <2 x i32> addrspace(1) * %in
+ %result = srem <2 x i32> %num, <i32 4, i32 4>
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %num = load <4 x i32> addrspace(1) * %in
+ %den = load <4 x i32> addrspace(1) * %den_ptr
+ %result = srem <4 x i32> %num, %den
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %num = load <4 x i32> addrspace(1) * %in
+ %result = srem <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/srl.ll b/test/CodeGen/R600/srl.ll
index 7637355..44ad73f 100644
--- a/test/CodeGen/R600/srl.ll
+++ b/test/CodeGen/R600/srl.ll
@@ -39,3 +39,129 @@ define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+;EG-CHECK: @lshr_i64
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: LSHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
+
+;SI-CHECK: @lshr_i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = lshr i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @lshr_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @lshr_v2i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = lshr <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+
+;EG-CHECK: @lshr_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @lshr_v4i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = lshr <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ssubo.ll b/test/CodeGen/R600/ssubo.ll
new file mode 100644
index 0000000..b330276
--- /dev/null
+++ b/test/CodeGen/R600/ssubo.ll
@@ -0,0 +1,64 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @ssubo_i64_zext
+define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_ssubo_i32
+define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %ssub, 0
+ %carry = extractvalue { i32, i1 } %ssub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_ssubo_i32
+define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %ssub, 0
+ %carry = extractvalue { i32, i1 } %ssub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_ssubo_i64
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
+define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_ssubo_i64
+; SI: V_SUB_I32_e32
+; SI: V_SUBB_U32_e32
+define void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index c0c8ccc..dd27533 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -263,8 +263,7 @@ entry:
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
; SI-CHECK-LABEL: @store_local_v2i32
-; SI-CHECK: DS_WRITE_B32
-; SI-CHECK: DS_WRITE_B32
+; SI-CHECK: DS_WRITE_B64
define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
entry:
store <2 x i32> %in, <2 x i32> addrspace(3)* %out
diff --git a/test/CodeGen/R600/sub.ll b/test/CodeGen/R600/sub.ll
index e321ed6..8e64148 100644
--- a/test/CodeGen/R600/sub.ll
+++ b/test/CodeGen/R600/sub.ll
@@ -1,5 +1,7 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() readnone
;FUNC-LABEL: @test2
;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -37,23 +39,37 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
ret void
}
-;FUNC_LABEL: @test5
+; FUNC-LABEL: @s_sub_i64:
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
-;EG-DAG: SETGE_UINT
-;EG-DAG: CNDE_INT
-;EG-DAG: SUB_INT
-;EG-DAG: SUB_INT
-;EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: CNDE_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+define void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
-;SI: S_XOR_B64
-;SI-DAG: S_ADD_I32
-;SI-DAG: S_ADDC_U32
-;SI-DAG: S_ADD_I32
-;SI-DAG: S_ADDC_U32
+; FUNC-LABEL: @v_sub_i64:
+; SI: V_SUB_I32_e32
+; SI: V_SUBB_U32_e32
-define void @test5(i64 addrspace(1)* %out, i64 %a, i64 %b) {
-entry:
- %0 = sub i64 %a, %b
- store i64 %0, i64 addrspace(1)* %out
+; EG-DAG: SETGE_UINT
+; EG-DAG: CNDE_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
+ %a = load i64 addrspace(1)* %a_ptr
+ %b = load i64 addrspace(1)* %b_ptr
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
ret void
}
diff --git a/test/CodeGen/R600/uaddo.ll b/test/CodeGen/R600/uaddo.ll
index 3b69687..a80e502 100644
--- a/test/CodeGen/R600/uaddo.ll
+++ b/test/CodeGen/R600/uaddo.ll
@@ -1,8 +1,10 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
-; SI-LABEL: @uaddo_i64_zext
+; FUNC-LABEL: @uaddo_i64_zext
; SI: ADD
; SI: ADDC
; SI: ADDC
@@ -15,3 +17,53 @@ define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
store i64 %add2, i64 addrspace(1)* %out, align 8
ret void
}
+
+; FUNC-LABEL: @s_uaddo_i32
+; SI: S_ADD_I32
+define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_uaddo_i32
+; SI: V_ADD_I32
+define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_uaddo_i64
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
+define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %uadd, 0
+ %carry = extractvalue { i64, i1 } %uadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_uaddo_i64
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %uadd, 0
+ %carry = extractvalue { i64, i1 } %uadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/udivrem.ll b/test/CodeGen/R600/udivrem.ll
new file mode 100644
index 0000000..5f5753a
--- /dev/null
+++ b/test/CodeGen/R600/udivrem.ll
@@ -0,0 +1,358 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_udivrem
+; EG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG: CNDE_INT
+; EG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG: CNDE_INT
+; EG: MULHI
+; EG: MULLO_INT
+; EG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI: V_RCP_IFLAG_F32_e32 [[RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[RCP_HI:v[0-9]+]], [[RCP]]
+; SI-DAG: V_MUL_LO_I32 [[RCP_LO:v[0-9]+]], [[RCP]]
+; SI-DAG: V_SUB_I32_e32 [[NEG_RCP_LO:v[0-9]+]], 0, [[RCP_LO]]
+; SI: V_CNDMASK_B32_e64
+; SI: V_MUL_HI_U32 [[E:v[0-9]+]], {{v[0-9]+}}, [[RCP]]
+; SI-DAG: V_ADD_I32_e32 [[RCP_A_E:v[0-9]+]], [[E]], [[RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[RCP_S_E:v[0-9]+]], [[E]], [[RCP]]
+; SI: V_CNDMASK_B32_e64
+; SI: V_MUL_HI_U32 [[Quotient:v[0-9]+]]
+; SI: V_MUL_LO_I32 [[Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: V_AND_B32_e32 [[Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[Quotient_A_One:v[0-9]+]], 1, [[Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+ %result0 = udiv i32 %x, %y
+ store i32 %result0, i32 addrspace(1)* %out
+ %result1 = urem i32 %x, %y
+ store i32 %result1, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_udivrem_v2
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FIRST_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FIRST_RCP_HI:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_RCP_LO:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FIRST_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Quotient_A_One:v[0-9]+]], {{.*}}, [[FIRST_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[SECOND_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[SECOND_RCP_HI:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_RCP_LO:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[SECOND_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Quotient_A_One:v[0-9]+]], {{.*}}, [[SECOND_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
+ %result0 = udiv <2 x i32> %x, %y
+ store <2 x i32> %result0, <2 x i32> addrspace(1)* %out
+ %result1 = urem <2 x i32> %x, %y
+ store <2 x i32> %result1, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+
+; FUNC-LABEL: @test_udivrem_v4
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FIRST_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FIRST_RCP_HI:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_RCP_LO:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FIRST_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Quotient_A_One:v[0-9]+]], {{.*}}, [[FIRST_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[SECOND_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[SECOND_RCP_HI:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_RCP_LO:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[SECOND_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Quotient_A_One:v[0-9]+]], {{.*}}, [[SECOND_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[THIRD_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[THIRD_RCP_HI:v[0-9]+]], [[THIRD_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[THIRD_RCP_LO:v[0-9]+]], [[THIRD_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[THIRD_NEG_RCP_LO:v[0-9]+]], 0, [[THIRD_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[THIRD_E:v[0-9]+]], {{v[0-9]+}}, [[THIRD_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[THIRD_RCP_A_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_RCP_S_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[THIRD_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[THIRD_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[THIRD_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[THIRD_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[THIRD_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[THIRD_Quotient_A_One:v[0-9]+]], {{.*}}, [[THIRD_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[THIRD_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FOURTH_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_RCP_HI:v[0-9]+]], [[FOURTH_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FOURTH_RCP_LO:v[0-9]+]], [[FOURTH_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FOURTH_NEG_RCP_LO:v[0-9]+]], 0, [[FOURTH_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_E:v[0-9]+]], {{v[0-9]+}}, [[FOURTH_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_RCP_A_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_RCP_S_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FOURTH_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FOURTH_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FOURTH_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FOURTH_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_Quotient_A_One:v[0-9]+]], {{.*}}, [[FOURTH_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+ %result0 = udiv <4 x i32> %x, %y
+ store <4 x i32> %result0, <4 x i32> addrspace(1)* %out
+ %result1 = urem <4 x i32> %x, %y
+ store <4 x i32> %result1, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll
index 75150c2..9a41796 100644
--- a/test/CodeGen/R600/uint_to_fp.f64.ll
+++ b/test/CodeGen/R600/uint_to_fp.f64.ll
@@ -2,8 +2,35 @@
; SI-LABEL: @uint_to_fp_f64_i32
; SI: V_CVT_F64_U32_e32
+; SI: S_ENDPGM
define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
%cast = uitofp i32 %in to double
store double %cast, double addrspace(1)* %out, align 8
ret void
}
+
+; SI-LABEL: @uint_to_fp_i1_f64:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
+; we should be able to fold the SGPRs into the V_CNDMASK instructions.
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to double
+ store double %fp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @uint_to_fp_i1_f64_load:
+; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, 1
+; SI-NEXT: V_CVT_F64_U32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+ %fp = uitofp i1 %in to double
+ store double %fp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll
index a5ac355..8f5d42d 100644
--- a/test/CodeGen/R600/uint_to_fp.ll
+++ b/test/CodeGen/R600/uint_to_fp.ll
@@ -1,28 +1,30 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @uint_to_fp_v2i32
-; R600-CHECK-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
-; R600-CHECK-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
-; SI-CHECK-LABEL: @uint_to_fp_v2i32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
+; FUNC-LABEL: @uint_to_fp_v2i32
+; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
+; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
+
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
define void @uint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
%result = uitofp <2 x i32> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; R600-CHECK-LABEL: @uint_to_fp_v4i32
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-CHECK-LABEL: @uint_to_fp_v4i32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
+; FUNC-LABEL: @uint_to_fp_v4i32
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%value = load <4 x i32> addrspace(1) * %in
%result = uitofp <4 x i32> %value to <4 x float>
@@ -30,17 +32,39 @@ define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
ret void
}
-; R600-CHECK-LABEL: @uint_to_fp_i64_f32
-; R600-CHECK: UINT_TO_FLT
-; R600-CHECK: UINT_TO_FLT
-; R600-CHECK: MULADD_IEEE
-; SI-CHECK-LABEL: @uint_to_fp_i64_f32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_MAD_F32
+; FUNC-LABEL: @uint_to_fp_i64_f32
+; R600: UINT_TO_FLT
+; R600: UINT_TO_FLT
+; R600: MULADD_IEEE
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_MAD_F32
+; SI: S_ENDPGM
define void @uint_to_fp_i64_f32(float addrspace(1)* %out, i64 %in) {
entry:
%0 = uitofp i64 %in to float
store float %0, float addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @uint_to_fp_i1_f32:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.000000e+00, [[CMP]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @uint_to_fp_i1_f32_load:
+; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.000000e+00
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) {
+ %fp = uitofp i1 %in to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/usubo.ll b/test/CodeGen/R600/usubo.ll
new file mode 100644
index 0000000..d57a2c7
--- /dev/null
+++ b/test/CodeGen/R600/usubo.ll
@@ -0,0 +1,66 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @usubo_i64_zext
+define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_usubo_i32
+; SI: S_SUB_I32
+define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %usub, 0
+ %carry = extractvalue { i32, i1 } %usub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_usubo_i32
+; SI: V_SUBREV_I32_e32
+define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %usub, 0
+ %carry = extractvalue { i32, i1 } %usub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_usubo_i64
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
+define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_usubo_i64
+; SI: V_SUB_I32
+; SI: V_SUBB_U32
+define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/vector-alloca.ll b/test/CodeGen/R600/vector-alloca.ll
new file mode 100644
index 0000000..6543f6d
--- /dev/null
+++ b/test/CodeGen/R600/vector-alloca.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+
+; FUNC-LABEL: @vector_read
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOVA_INT
+define void @vector_read(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 1, i32* %y
+ store i32 2, i32* %z
+ store i32 3, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 %index
+ %2 = load i32* %1
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @vector_write
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOVA_INT
+; EG: MOVA_INT
+define void @vector_write(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 0, i32* %y
+ store i32 0, i32* %z
+ store i32 0, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 %w_index
+ store i32 1, i32* %1
+ %2 = getelementptr [4 x i32]* %0, i32 0, i32 %r_index
+ %3 = load i32* %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
+
+; This test should be optimize to:
+; store i32 0, i32 addrspace(1)* %out
+; FUNC-LABEL: @bitcast_gep
+; CHECK: STORE_RAW
+define void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 0, i32* %y
+ store i32 0, i32* %z
+ store i32 0, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %2 = bitcast i32* %1 to [4 x i32]*
+ %3 = getelementptr [4 x i32]* %2, i32 0, i32 0
+ %4 = load i32* %3
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
index 5a5c86d..ab618cf 100644
--- a/test/CodeGen/R600/xor.ll
+++ b/test/CodeGen/R600/xor.ll
@@ -90,3 +90,69 @@ define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
store i32 %result, i32 addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @vector_xor_i64
+; SI-CHECK: V_XOR_B32_e32
+; SI-CHECK: V_XOR_B32_e32
+; SI-CHECK: S_ENDPGM
+define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+ %a = load i64 addrspace(1)* %in0
+ %b = load i64 addrspace(1)* %in1
+ %result = xor i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_xor_i64
+; SI-CHECK: S_XOR_B64
+; SI-CHECK: S_ENDPGM
+define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %result = xor i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_not_i64
+; SI-CHECK: S_NOT_B64
+define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
+ %result = xor i64 %a, -1
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @vector_not_i64
+; SI-CHECK: V_NOT_B32
+; SI-CHECK: V_NOT_B32
+define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+ %a = load i64 addrspace(1)* %in0
+ %b = load i64 addrspace(1)* %in1
+ %result = xor i64 %a, -1
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; Test that we have a pattern to match xor inside a branch.
+; Note that in the future the backend may be smart enough to
+; use an SALU instruction for this.
+
+; SI-CHECK-LABEL: @xor_cf
+; SI-CHECK: V_XOR
+; SI-CHECK: V_XOR
+define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
+entry:
+ %0 = icmp eq i64 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = xor i64 %a, %b
+ br label %endif
+
+else:
+ %2 = load i64 addrspace(1)* %in
+ br label %endif
+
+endif:
+ %3 = phi i64 [%1, %if], [%2, %else]
+ store i64 %3, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/SPARC/atomics.ll b/test/CodeGen/SPARC/atomics.ll
index 5e41300..ee6c1f8 100644
--- a/test/CodeGen/SPARC/atomics.ll
+++ b/test/CodeGen/SPARC/atomics.ll
@@ -38,7 +38,8 @@ entry:
define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
entry:
- %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
+ %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
+ %b = extractvalue { i32, i1 } %pair, 0
ret i32 %b
}
@@ -48,7 +49,8 @@ entry:
define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
entry:
- %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
+ %pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
+ %b = extractvalue { i64, i1 } %pair, 0
ret i64 %b
}
diff --git a/test/CodeGen/SPARC/lit.local.cfg b/test/CodeGen/SPARC/lit.local.cfg
index 4d344fa..fa6a54e 100644
--- a/test/CodeGen/SPARC/lit.local.cfg
+++ b/test/CodeGen/SPARC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Sparc' in targets:
+if not 'Sparc' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/SystemZ/Large/lit.local.cfg b/test/CodeGen/SystemZ/Large/lit.local.cfg
index 9a02f84..4f22a97 100644
--- a/test/CodeGen/SystemZ/Large/lit.local.cfg
+++ b/test/CodeGen/SystemZ/Large/lit.local.cfg
@@ -5,6 +5,5 @@ config.suffixes = ['.py']
if config.root.host_arch not in ['SystemZ']:
config.unsupported = True
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/SystemZ/cmpxchg-01.ll b/test/CodeGen/SystemZ/cmpxchg-01.ll
index bb0b18a..5118aad 100644
--- a/test/CodeGen/SystemZ/cmpxchg-01.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-01.ll
@@ -32,7 +32,8 @@ define i8 @f1(i8 %dummy, i8 *%src, i8 %cmp, i8 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -8([[NEGSHIFT]])
- %res = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst seq_cst
+ %pair = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst seq_cst
+ %res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}
@@ -50,6 +51,7 @@ define i8 @f2(i8 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 55, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i8 *%src, i8 42, i8 88 seq_cst seq_cst
+ %pair = cmpxchg i8 *%src, i8 42, i8 88 seq_cst seq_cst
+ %res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-02.ll b/test/CodeGen/SystemZ/cmpxchg-02.ll
index 8d46a8c..9eb0628 100644
--- a/test/CodeGen/SystemZ/cmpxchg-02.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-02.ll
@@ -32,7 +32,8 @@ define i16 @f1(i16 %dummy, i16 *%src, i16 %cmp, i16 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -16([[NEGSHIFT]])
- %res = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst seq_cst
+ %pair = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst seq_cst
+ %res = extractvalue { i16, i1 } %pair, 0
ret i16 %res
}
@@ -50,6 +51,7 @@ define i16 @f2(i16 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 47, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i16 *%src, i16 42, i16 88 seq_cst seq_cst
+ %pair = cmpxchg i16 *%src, i16 42, i16 88 seq_cst seq_cst
+ %res = extractvalue { i16, i1 } %pair, 0
ret i16 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-03.ll b/test/CodeGen/SystemZ/cmpxchg-03.ll
index f6a2ad0..c5fab4d 100644
--- a/test/CodeGen/SystemZ/cmpxchg-03.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-03.ll
@@ -7,7 +7,8 @@ define i32 @f1(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK-LABEL: f1:
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -17,7 +18,8 @@ define i32 @f2(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 4092(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1023
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -27,7 +29,8 @@ define i32 @f3(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 4096(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1024
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -37,7 +40,8 @@ define i32 @f4(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 524284(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131071
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -49,7 +53,8 @@ define i32 @f5(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -59,7 +64,8 @@ define i32 @f6(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -4(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -1
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -69,7 +75,8 @@ define i32 @f7(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -81,7 +88,8 @@ define i32 @f8(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131073
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -93,7 +101,8 @@ define i32 @f9(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -106,7 +115,8 @@ define i32 @f10(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
%add1 = add i64 %src, %index
%add2 = add i64 %add1, 4096
%ptr = inttoptr i64 %add2 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -116,7 +126,8 @@ define i32 @f11(i32 %dummy, i32 %swap, i32 *%ptr) {
; CHECK: lhi %r2, 1001
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -126,6 +137,7 @@ define i32 @f12(i32 %cmp, i32 *%ptr) {
; CHECK: lhi [[SWAP:%r[0-9]+]], 1002
; CHECK: cs %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-04.ll b/test/CodeGen/SystemZ/cmpxchg-04.ll
index 069bad6..ba1493e 100644
--- a/test/CodeGen/SystemZ/cmpxchg-04.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-04.ll
@@ -7,7 +7,8 @@ define i64 @f1(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK-LABEL: f1:
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -17,7 +18,8 @@ define i64 @f2(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 524280(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65535
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -29,7 +31,8 @@ define i64 @f3(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -39,7 +42,8 @@ define i64 @f4(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -8(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -1
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -49,7 +53,8 @@ define i64 @f5(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -61,7 +66,8 @@ define i64 @f6(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65537
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -73,7 +79,8 @@ define i64 @f7(i64 %cmp, i64 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i64 *
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -83,7 +90,8 @@ define i64 @f8(i64 %dummy, i64 %swap, i64 *%ptr) {
; CHECK: lghi %r2, 1001
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -93,6 +101,7 @@ define i64 @f9(i64 %cmp, i64 *%ptr) {
; CHECK: lghi [[SWAP:%r[0-9]+]], 1002
; CHECK: csg %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
diff --git a/test/CodeGen/SystemZ/lit.local.cfg b/test/CodeGen/SystemZ/lit.local.cfg
index b12af09..5c02dd3 100644
--- a/test/CodeGen/SystemZ/lit.local.cfg
+++ b/test/CodeGen/SystemZ/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll b/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll
new file mode 100644
index 0000000..ae66369
--- /dev/null
+++ b/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=thumbv6m-eabi -o - | FileCheck %s
+; XFAIL: *
+
+define void @foo(i32* %A) #0 {
+entry:
+; CHECK-LABEL: foo:
+; CHECK: push {r7, lr}
+; CHECK: ldm [[REG0:r[0-9]]]!,
+; CHECK-NEXT: subs [[REG0]]
+; CHECK-NEXT: bl
+ %0 = load i32* %A, align 4
+ %arrayidx1 = getelementptr inbounds i32* %A, i32 1
+ %1 = load i32* %arrayidx1, align 4
+ tail call void @bar(i32* %A, i32 %0, i32 %1) #2
+ ret void
+}
+
+declare void @bar(i32*, i32, i32) #1
diff --git a/test/CodeGen/Thumb/dyn-stackalloc.ll b/test/CodeGen/Thumb/dyn-stackalloc.ll
index 6bc39af..6c6de55 100644
--- a/test/CodeGen/Thumb/dyn-stackalloc.ll
+++ b/test/CodeGen/Thumb/dyn-stackalloc.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-cgp-branch-opts -disable-post-ra | FileCheck %s -check-prefix=CHECK -check-prefix=RA_GREEDY
-; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-cgp-branch-opts -disable-post-ra -regalloc=basic | FileCheck %s -check-prefix=CHECK -check-prefix=RA_BASIC
+; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-cgp-branch-opts -disable-post-ra | FileCheck %s
+; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-cgp-branch-opts -disable-post-ra -regalloc=basic | FileCheck %s
%struct.state = type { i32, %struct.info*, float**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i8* }
%struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
@@ -45,8 +45,7 @@ define void @t2(%struct.comment* %vc, i8* %tag, i8* %contents) {
; CHECK: sub sp, #
; CHECK: mov r[[R0:[0-9]+]], sp
; CHECK: str r{{[0-9+]}}, [r[[R0]]
-; RA_GREEDY: str r{{[0-9+]}}, [r[[R0]]
-; RA_BASIC: stm r[[R0]]!
+; CHECK: str r{{[0-9+]}}, [r[[R0]]
; CHECK-NOT: ldr r0, [sp
; CHECK: mov r[[R1:[0-9]+]], sp
; CHECK: subs r[[R2:[0-9]+]], r[[R1]], r{{[0-9]+}}
diff --git a/test/CodeGen/Thumb/fastcc.ll b/test/CodeGen/Thumb/fastcc.ll
new file mode 100644
index 0000000..98ff684
--- /dev/null
+++ b/test/CodeGen/Thumb/fastcc.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -mcpu=arm926ej-s -mattr=+vfp2
+
+; This is a regression test, to ensure that fastcc functions are correctly
+; handled when compiling for a processor which has a floating-point unit which
+; is not accessible from the selected instruction set.
+
+target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv5e-none-linux-gnueabi"
+
+; Function Attrs: optsize
+define fastcc void @_foo(float %walpha) #0 {
+entry:
+ br label %for.body13
+
+for.body13: ; preds = %for.body13, %entry
+ br i1 undef, label %for.end182.critedge, label %for.body13
+
+for.end182.critedge: ; preds = %for.body13
+ %conv183 = fpext float %walpha to double
+ %mul184 = fmul double %conv183, 8.200000e-01
+ %conv185 = fptrunc double %mul184 to float
+ %conv188 = fpext float %conv185 to double
+ %mul189 = fmul double %conv188, 6.000000e-01
+ %conv190 = fptrunc double %mul189 to float
+ br label %for.body193
+
+for.body193: ; preds = %for.body193, %for.end182.critedge
+ %mul195 = fmul float %conv190, undef
+ br label %for.body193
+}
+
+attributes #0 = { optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/CodeGen/Thumb/lit.local.cfg b/test/CodeGen/Thumb/lit.local.cfg
index 8a3ba96..98c6700 100644
--- a/test/CodeGen/Thumb/lit.local.cfg
+++ b/test/CodeGen/Thumb/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb/thumb-ldm.ll b/test/CodeGen/Thumb/thumb-ldm.ll
index dd98e6f..95f3edc 100644
--- a/test/CodeGen/Thumb/thumb-ldm.ll
+++ b/test/CodeGen/Thumb/thumb-ldm.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=thumbv6m-eabi -o - | FileCheck %s
+; XFAIL: *
@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
diff --git a/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll b/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll
index 06cfd9b..dedc82b 100644
--- a/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll
+++ b/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s
+; XFAIL: *
@d = external global [64 x i32]
@s = external global [64 x i32]
diff --git a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
index e014453..09e0ed1 100644
--- a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
+++ b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -arm-atomic-cfg-tidy=0 | FileCheck %s
@csize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
@vsize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
diff --git a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
index 940cfd1..c8eac8d 100644
--- a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
+++ b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4659
; PR4682
diff --git a/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll b/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
index 52066d3..a9a2478 100644
--- a/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
+++ b/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-cgp-branch-opts | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-cgp-branch-opts -arm-atomic-cfg-tidy=0 | FileCheck %s
%struct.pix_pos = type { i32, i32, i32, i32, i32, i32 }
diff --git a/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll b/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
index 1b8bdb1..8beb5b1 100644
--- a/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
+++ b/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 -O3 | FileCheck %s
; rdar://7493908
; Make sure the result of the first dynamic_alloc isn't copied back to sp more
diff --git a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
index 810bfb7..f3046e1 100644
--- a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
+++ b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://8115404
; Tail merging must not split an IT block.
diff --git a/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll b/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
index 75f5439..3d89390 100644
--- a/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
+++ b/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
@@ -1,5 +1,5 @@
; rdar://8465407
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
%struct.buf = type opaque
diff --git a/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll b/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
index b1ce3bb..240df83 100644
--- a/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
+++ b/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-apple-darwin10 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-apple-darwin10 -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
%struct.op = type { %struct.op*, %struct.op*, %struct.op* ()*, i32, i16, i16, i8, i8 }
diff --git a/test/CodeGen/Thumb2/buildvector-crash.ll b/test/CodeGen/Thumb2/buildvector-crash.ll
index 8a3c895..16e2298 100644
--- a/test/CodeGen/Thumb2/buildvector-crash.ll
+++ b/test/CodeGen/Thumb2/buildvector-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 | FileCheck %s
; Formerly crashed, 3573915.
define void @RotateStarsFP_Vec() nounwind {
diff --git a/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll b/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
index a9f948c..88c7f0f 100644
--- a/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
+++ b/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
define void @fht(float* nocapture %fz, i16 signext %n) nounwind {
; CHECK-LABEL: fht:
diff --git a/test/CodeGen/Thumb2/ldr-str-imm12.ll b/test/CodeGen/Thumb2/ldr-str-imm12.ll
index 36544d1..d20eef0 100644
--- a/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ b/test/CodeGen/Thumb2/ldr-str-imm12.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
; rdar://7352504
; Make sure we use "str r9, [sp, #+28]" instead of "sub.w r4, r7, #256" followed by "str r9, [r4, #-32]".
diff --git a/test/CodeGen/Thumb2/lit.local.cfg b/test/CodeGen/Thumb2/lit.local.cfg
index 8a3ba96..98c6700 100644
--- a/test/CodeGen/Thumb2/lit.local.cfg
+++ b/test/CodeGen/Thumb2/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb2/thumb2-branch.ll b/test/CodeGen/Thumb2/thumb2-branch.ll
index a00b22d..332ed50 100644
--- a/test/CodeGen/Thumb2/thumb2-branch.ll
+++ b/test/CodeGen/Thumb2/thumb2-branch.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 -arm-atomic-cfg-tidy=0 | FileCheck %s
; If-conversion defeats the purpose of this test, which is to check
; conditional branch generation, so a call to make sure it doesn't
; happen and we get actual branches.
diff --git a/test/CodeGen/Thumb2/thumb2-cbnz.ll b/test/CodeGen/Thumb2/thumb2-cbnz.ll
index 893bd0f..f0f7916 100644
--- a/test/CodeGen/Thumb2/thumb2-cbnz.ll
+++ b/test/CodeGen/Thumb2/thumb2-cbnz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://7354379
declare double @foo(double) nounwind readnone
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index 403cd48..a861912 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-default-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8-apple-ios -arm-no-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 -arm-default-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8-apple-ios -arm-atomic-cfg-tidy=0 -arm-no-restrict-it | FileCheck %s
define void @foo(i32 %X, i32 %Y) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt3.ll b/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
index a71aa3f..79667d4 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-default-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8-apple-darwin -arm-no-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -arm-default-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8-apple-darwin -arm-atomic-cfg-tidy=0 -arm-no-restrict-it | FileCheck %s
; There shouldn't be a unconditional branch at end of bb52.
; rdar://7184787
diff --git a/test/CodeGen/Thumb2/thumb2-spill-q.ll b/test/CodeGen/Thumb2/thumb2-spill-q.ll
index 52c1063..94f4725 100644
--- a/test/CodeGen/Thumb2/thumb2-spill-q.ll
+++ b/test/CodeGen/Thumb2/thumb2-spill-q.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-elf -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-elf -mattr=+neon -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4789
%bar = type { float, float, float }
diff --git a/test/CodeGen/Thumb2/tpsoft.ll b/test/CodeGen/Thumb2/tpsoft.ll
new file mode 100644
index 0000000..6ab8bf0
--- /dev/null
+++ b/test/CodeGen/Thumb2/tpsoft.ll
@@ -0,0 +1,54 @@
+; RUN: llc %s -mtriple=thumbv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ELFASM %s
+; RUN: llc %s -mtriple=thumbebv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ELFASM %s
+; RUN: llc %s -mtriple=thumbv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=ELFOBJ -check-prefix=ELFOBJ-LE %s
+; RUN: llc %s -mtriple=thumbebv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=ELFOBJ -check-prefix=ELFOBJ-BE %s
+
+;; Make sure that bl __aeabi_read_tp is materialized and fixed up correctly
+;; in the obj case.
+
+@i = external thread_local global i32
+@a = external global i8
+@b = external global [10 x i8]
+
+define arm_aapcs_vfpcc i32 @main() nounwind {
+entry:
+ %0 = load i32* @i, align 4
+ switch i32 %0, label %bb2 [
+ i32 12, label %bb
+ i32 13, label %bb1
+ ]
+
+bb: ; preds = %entry
+ %1 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
+ ret i32 %1
+; ELFASM: bl __aeabi_read_tp
+
+
+; ELFOBJ: Sections [
+; ELFOBJ: Section {
+; ELFOBJ: Name: .text
+; ELFOBJ-LE: SectionData (
+;;; BL __aeabi_read_tp is ---------+
+;;; V
+; ELFOBJ-LE-NEXT: 0000: 2DE90048 0E487844 0168FFF7 FEFF4058
+; ELFOBJ-BE: SectionData (
+;;; BL __aeabi_read_tp is ---------+
+;;; V
+; ELFOBJ-BE-NEXT: 0000: E92D4800 480E4478 6801F7FF FFFE5840
+
+
+bb1: ; preds = %entry
+ %2 = tail call arm_aapcs_vfpcc i32 @bar(i32* bitcast ([10 x i8]* @b to i32*)) nounwind
+ ret i32 %2
+
+bb2: ; preds = %entry
+ ret i32 -1
+}
+
+declare arm_aapcs_vfpcc i32 @foo(i8*)
+
+declare arm_aapcs_vfpcc i32 @bar(i32*)
diff --git a/test/CodeGen/Thumb2/v8_IT_3.ll b/test/CodeGen/Thumb2/v8_IT_3.ll
index 4dca246..a028dee 100644
--- a/test/CodeGen/Thumb2/v8_IT_3.ll
+++ b/test/CodeGen/Thumb2/v8_IT_3.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8 -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
%struct.FF = type { i32 (i32*)*, i32 (i32*, i32*, i32, i32, i32, i32)*, i32 (i32, i32, i8*)*, void ()*, i32 (i32, i8*, i32*)*, i32 ()* }
%struct.BD = type { %struct.BD*, i32, i32, i32, i32, i64, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i64, i32)*, [16 x i8], i64, i64 }
diff --git a/test/CodeGen/Thumb2/v8_IT_5.ll b/test/CodeGen/Thumb2/v8_IT_5.ll
index 2f352d6..2da75ad 100644
--- a/test/CodeGen/Thumb2/v8_IT_5.ll
+++ b/test/CodeGen/Thumb2/v8_IT_5.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it | FileCheck %s
; CHECK: it ne
; CHECK-NEXT: cmpne
; CHECK-NEXT: bne [[JUMPTARGET:.LBB[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/X86/2007-05-05-Personality.ll b/test/CodeGen/X86/2007-05-05-Personality.ll
index 5b8fe72..b99c58c 100644
--- a/test/CodeGen/X86/2007-05-05-Personality.ll
+++ b/test/CodeGen/X86/2007-05-05-Personality.ll
@@ -1,12 +1,14 @@
; RUN: llc < %s -mtriple=i686-pc-linux-gnu -o - | FileCheck %s --check-prefix=LIN
-; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=LIN
; RUN: llc < %s -mtriple=i386-pc-mingw32 -o - | FileCheck %s --check-prefix=WIN
; RUN: llc < %s -mtriple=i686-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN64
; LIN: .cfi_personality 0, __gnat_eh_personality
; LIN: .cfi_lsda 0, .Lexception0
; WIN: .cfi_personality 0, ___gnat_eh_personality
; WIN: .cfi_lsda 0, Lexception0
+; WIN64: .seh_handler __gnat_eh_personality
+; WIN64: .seh_handlerdata
@error = external global i8
@@ -15,7 +17,7 @@ entry:
invoke void @raise()
to label %eh_then unwind label %unwind
-unwind: ; preds = %entry
+unwind: ; preds = %entry
%eh_ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*)
catch i8* @error
%eh_select = extractvalue { i8*, i32 } %eh_ptr, 1
diff --git a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll b/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
deleted file mode 100644
index 0ae1897..0000000
--- a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep -- -86
-
-define i16 @f(<4 x float>* %tmp116117.i1061.i) nounwind {
-entry:
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:0 [#uses=167]
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:1 [#uses=170]
- alloca [4 x <4 x i32>] ; <[4 x <4 x i32>]*>:2 [#uses=12]
- %.sub6235.i = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0 ; <<4 x float>*> [#uses=76]
- %.sub.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0 ; <<4 x float>*> [#uses=59]
-
- %tmp124.i1062.i = getelementptr <4 x float>* %tmp116117.i1061.i, i32 63 ; <<4 x float>*> [#uses=1]
- %tmp125.i1063.i = load <4 x float>* %tmp124.i1062.i ; <<4 x float>> [#uses=5]
- %tmp828.i1077.i = shufflevector <4 x float> %tmp125.i1063.i, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=4]
- %tmp704.i1085.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp712.i1086.i = call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %tmp704.i1085.i, <4 x float> %tmp828.i1077.i ) ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp712.i1086.i, <4 x float>* %.sub.i
-
- %tmp2587.i1145.gep.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp5334.i = load float* %tmp2587.i1145.gep.i ; <float> [#uses=5]
- %tmp2723.i1170.i = insertelement <4 x float> undef, float %tmp5334.i, i32 2 ; <<4 x float>> [#uses=5]
- store <4 x float> %tmp2723.i1170.i, <4 x float>* %.sub6235.i
-
- %tmp1406.i1367.i = shufflevector <4 x float> %tmp2723.i1170.i, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
- %tmp84.i1413.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp89.i1415.i = fmul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp89.i1415.i, <4 x float>* %.sub.i
- ret i16 0
-}
-
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
diff --git a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
index e64375a..a0106d7 100644
--- a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
+++ b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
@@ -8,7 +8,7 @@ target triple = "i386-pc-linux-gnu"
@__resp = thread_local global %struct.__res_state* @_res ; <%struct.__res_state**> [#uses=1]
@_res = global %struct.__res_state zeroinitializer, section ".bss" ; <%struct.__res_state*> [#uses=1]
-@__libc_resp = hidden alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
+@__libc_resp = hidden thread_local alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
define i32 @foo() {
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
index 1259cf4..dfb98bb 100644
--- a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
+++ b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
@@ -1,7 +1,7 @@
; RUN: llc -mcpu=generic -mtriple=x86_64-mingw32 < %s | FileCheck %s
; CHECK: subq $40, %rsp
-; CHECK: movaps %xmm8, (%rsp)
-; CHECK: movaps %xmm7, 16(%rsp)
+; CHECK: movaps %xmm8, 16(%rsp)
+; CHECK: movaps %xmm7, (%rsp)
define i32 @a() nounwind {
entry:
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index f9bf310..850f678 100644
--- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -11,9 +11,9 @@ entry:
; CHECK: movl 4([[REG]]), %edx
; CHECK: LBB0_1:
; CHECK: movl %eax, %ebx
-; CHECK: addl {{%[a-z]+}}, %ebx
+; CHECK: addl $1, %ebx
; CHECK: movl %edx, %ecx
-; CHECK: adcl {{%[a-z]+}}, %ecx
+; CHECK: adcl $0, %ecx
; CHECK: lock
; CHECK-NEXT: cmpxchg8b ([[REG]])
; CHECK-NEXT: jne
diff --git a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
index b45ac22..4181c26 100644
--- a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
+++ b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
@@ -24,7 +24,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.lv = !{!0, !14, !15, !16, !17, !24, !25, !28}
!0 = metadata !{i32 786689, metadata !1, metadata !"this", metadata !3, i32 11, metadata !12, i32 0, null} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 786478, metadata !31, metadata !2, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEi", i32 11, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 (%struct.foo*, i32)* @_ZN3foo3bazEi, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
+!1 = metadata !{i32 786478, metadata !31, metadata !2, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEi", i32 11, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 (%struct.foo*, i32)* null, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786451, metadata !31, metadata !3, metadata !"foo", i32 3, i64 32, i64 32, i64 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 3, size 32, align 32, offset 0] [def] [from ]
!3 = metadata !{i32 786473, metadata !31} ; [ DW_TAG_file_type ]
!4 = metadata !{i32 786449, metadata !31, i32 4, metadata !"4.2.1 LLVM build", i1 true, metadata !"", i32 0, metadata !32, metadata !32, metadata !33, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
index f69cedc..ebf51a5 100644
--- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
+++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
@@ -18,7 +18,8 @@ entry:
loop:
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
+ %pair = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
+ %r = extractvalue { i64, i1 } %pair, 0
%stored1 = icmp eq i64 %r, 0
br i1 %stored1, label %loop, label %continue
continue:
diff --git a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
index f016528..625a351 100644
--- a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
+++ b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
@@ -8,11 +8,11 @@ target triple = "x86_64-apple-darwin10.0.0"
; CHECK: DW_TAG_subprogram
; CHECK: DW_TAG_variable
; CHECK: DW_TAG_variable
+; CHECK-NEXT: DW_AT_location
; CHECK-NEXT: DW_AT_name {{.*}} "z_s"
; CHECK-NEXT: DW_AT_decl_file
; CHECK-NEXT: DW_AT_decl_line
; CHECK-NEXT: DW_AT_type{{.*}}{[[TYPE:.*]]}
-; CHECK-NEXT: DW_AT_location
; CHECK: [[TYPE]]:
; CHECK-NEXT: DW_AT_name {{.*}} "int"
diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
index 650839a..36667de 100644
--- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
@@ -69,15 +69,15 @@ declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...)
!1 = metadata !{metadata !2}
!2 = metadata !{}
!4 = metadata !{i32 786688, metadata !5, metadata !"num1", metadata !14, i32 815, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [num1] [line 815]
-!5 = metadata !{i32 786443, metadata !6, i32 815, i32 0, metadata !14, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!6 = metadata !{i32 786443, metadata !7, i32 812, i32 0, metadata !14, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!7 = metadata !{i32 786443, metadata !8, i32 807, i32 0, metadata !14, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!8 = metadata !{i32 786443, metadata !9, i32 440, i32 0, metadata !14, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!9 = metadata !{i32 786443, metadata !10, i32 435, i32 0, metadata !14, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!10 = metadata !{i32 786443, metadata !11, i32 434, i32 0, metadata !14, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!11 = metadata !{i32 786443, metadata !12, i32 250, i32 0, metadata !14, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!12 = metadata !{i32 786443, metadata !13, i32 249, i32 0, metadata !14, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!13 = metadata !{i32 786443, metadata !2, i32 221, i32 0, metadata !14, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!5 = metadata !{i32 786443, metadata !14, metadata !6, i32 815, i32 0, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!6 = metadata !{i32 786443, metadata !14, metadata !7, i32 812, i32 0, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!7 = metadata !{i32 786443, metadata !14, metadata !8, i32 807, i32 0, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!8 = metadata !{i32 786443, metadata !14, metadata !9, i32 440, i32 0, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!9 = metadata !{i32 786443, metadata !14, metadata !10, i32 435, i32 0, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!10 = metadata !{i32 786443, metadata !14, metadata !11, i32 434, i32 0, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!11 = metadata !{i32 786443, metadata !14, metadata !12, i32 250, i32 0, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!12 = metadata !{i32 786443, metadata !14, metadata !13, i32 249, i32 0, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!13 = metadata !{i32 786443, metadata !14, metadata !2, i32 221, i32 0, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
!14 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
!15 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 160, i64 8, i32 0, i32 0, metadata !16, metadata !17, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
!16 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
diff --git a/test/CodeGen/X86/2014-05-29-factorial.ll b/test/CodeGen/X86/2014-05-29-factorial.ll
new file mode 100644
index 0000000..987a21d
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-29-factorial.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: decq [[X:%rdi|%rcx]]
+; CHECK-NOT: testq [[X]], [[X]]
+
+define i64 @fact2(i64 %x) {
+entry:
+ br label %while.body
+
+while.body:
+ %result.06 = phi i64 [ %mul, %while.body ], [ 1, %entry ]
+ %x.addr.05 = phi i64 [ %dec, %while.body ], [ %x, %entry ]
+ %mul = mul nsw i64 %result.06, %x.addr.05
+ %dec = add nsw i64 %x.addr.05, -1
+ %cmp = icmp sgt i64 %dec, 0
+ br i1 %cmp, label %while.body, label %while.end.loopexit
+
+while.end.loopexit:
+ %mul.lcssa = phi i64 [ %mul, %while.body ]
+ br label %while.end
+
+while.end:
+ %result.0.lcssa = phi i64 [ %mul.lcssa, %while.end.loopexit ]
+ ret i64 %result.0.lcssa
+}
diff --git a/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
new file mode 100644
index 0000000..4580795
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: addl
+
+; The two additions are the same , but have different flags.
+; In theory this code should never be generated by the frontend, but this
+; tries to test that two identical instructions with two different flags
+; actually generate two different nodes.
+;
+; Normally the combiner would see this condition without the flags
+; and optimize the result of the sub into a register clear
+; (the final result would be 0). With the different flags though the combiner
+; needs to keep the add + sub nodes, because the two nodes result as different
+; nodes and so cannot assume that the subtraction of the two nodes
+; generates 0 as result
+define i32 @foo(i32 %a, i32 %b) {
+ %1 = add i32 %a, %b
+ %2 = add nsw i32 %a, %b
+ %3 = sub i32 %1, %2
+ ret i32 %3
+}
diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll
index c274688..c392e94 100644
--- a/test/CodeGen/X86/Atomics-64.ll
+++ b/test/CodeGen/X86/Atomics-64.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 > %t.x86-64
-; RUN: llc < %s -march=x86 > %t.x86
+; RUN: llc < %s -march=x86 -mattr=cx16 > %t.x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
@@ -704,7 +704,8 @@ entry:
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
- %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %6 = extractvalue { i8, i1 } %pair6, 0
store i8 %6, i8* @sc, align 1
%7 = load i8* @sc, align 1
%8 = zext i8 %7 to i32
@@ -712,7 +713,8 @@ entry:
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
- %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %13 = extractvalue { i8, i1 } %pair13, 0
store i8 %13, i8* @uc, align 1
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
@@ -722,7 +724,8 @@ entry:
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
%21 = trunc i32 %16 to i16
- %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %22 = extractvalue { i16, i1 } %pair22, 0
store i16 %22, i16* @ss, align 2
%23 = load i8* @sc, align 1
%24 = sext i8 %23 to i16
@@ -732,49 +735,56 @@ entry:
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
%30 = trunc i32 %25 to i16
- %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %31 = extractvalue { i16, i1 } %pair31, 0
store i16 %31, i16* @us, align 2
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = load i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %37 = extractvalue { i32, i1 } %pair37, 0
store i32 %37, i32* @si, align 4
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = load i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %43 = extractvalue { i32, i1 } %pair43, 0
store i32 %43, i32* @ui, align 4
%44 = load i8* @sc, align 1
%45 = sext i8 %44 to i64
%46 = load i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %49 = extractvalue { i64, i1 } %pair49, 0
store i64 %49, i64* @sl, align 8
%50 = load i8* @sc, align 1
%51 = sext i8 %50 to i64
%52 = load i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %55 = extractvalue { i64, i1 } %pair55, 0
store i64 %55, i64* @ul, align 8
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i64
%58 = load i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
- %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %61 = extractvalue { i64, i1 } %pair61, 0
store i64 %61, i64* @sll, align 8
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i64
%64 = load i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
- %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %67 = extractvalue { i64, i1 } %pair67, 0
store i64 %67, i64* @ull, align 8
%68 = load i8* @sc, align 1
%69 = zext i8 %68 to i32
@@ -782,7 +792,8 @@ entry:
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
- %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %pair74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %74 = extractvalue { i8, i1 } %pair74, 0
%75 = icmp eq i8 %74, %72
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
@@ -793,7 +804,8 @@ entry:
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
- %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %pair84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %84 = extractvalue { i8, i1 } %pair84, 0
%85 = icmp eq i8 %84, %82
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
@@ -805,7 +817,8 @@ entry:
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
- %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %pair95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %95 = extractvalue { i8, i1 } %pair95, 0
%96 = icmp eq i8 %95, %93
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -817,7 +830,8 @@ entry:
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
- %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %pair106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %106 = extractvalue { i8, i1 } %pair106, 0
%107 = icmp eq i8 %106, %104
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
@@ -828,7 +842,8 @@ entry:
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
- %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %pair116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %116 = extractvalue { i8, i1 } %pair116, 0
%117 = icmp eq i8 %116, %114
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
@@ -839,7 +854,8 @@ entry:
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
- %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %pair126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %126 = extractvalue { i8, i1 } %pair126, 0
%127 = icmp eq i8 %126, %124
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
@@ -850,7 +866,8 @@ entry:
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
- %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %pair136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %136 = extractvalue { i8, i1 } %pair136, 0
%137 = icmp eq i8 %136, %134
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
@@ -861,7 +878,8 @@ entry:
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
- %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %pair146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %146 = extractvalue { i8, i1 } %pair146, 0
%147 = icmp eq i8 %146, %144
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
@@ -872,7 +890,8 @@ entry:
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
- %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %pair156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %156 = extractvalue { i8, i1 } %pair156, 0
%157 = icmp eq i8 %156, %154
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
@@ -883,7 +902,8 @@ entry:
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
- %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %pair166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %166 = extractvalue { i8, i1 } %pair166, 0
%167 = icmp eq i8 %166, %164
%168 = zext i1 %167 to i8
%169 = zext i8 %168 to i32
diff --git a/test/CodeGen/X86/GC/lit.local.cfg b/test/CodeGen/X86/GC/lit.local.cfg
index ba763cf..e71f3cc 100644
--- a/test/CodeGen/X86/GC/lit.local.cfg
+++ b/test/CodeGen/X86/GC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/aliases.ll b/test/CodeGen/X86/aliases.ll
index 8487c60..bf55644 100644
--- a/test/CodeGen/X86/aliases.ll
+++ b/test/CodeGen/X86/aliases.ll
@@ -1,4 +1,20 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false \
+; RUN: -relocation-model=pic | FileCheck %s
+
+@thread_var = thread_local global i32 42, align 4
+@thread_alias = thread_local(localdynamic) alias i32* @thread_var
+
+; CHECK-LABEL: get_thread_var
+define i32* @get_thread_var() {
+; CHECK: leal thread_var@TLSGD
+ ret i32* @thread_var
+}
+
+; CHECK-LABEL: get_thread_alias
+define i32* @get_thread_alias() {
+; CHECK: leal thread_alias@TLSLD
+ ret i32* @thread_alias
+}
@bar = global i32 42
@@ -22,7 +38,7 @@ define i32 @foo_f() {
@bar_i = alias internal i32* @bar
; CHECK-DAG: .globl A
-@A = alias i64, i32* @bar
+@A = alias bitcast (i32* @bar to i64*)
; CHECK-DAG: .globl bar_h
; CHECK-DAG: .hidden bar_h
@@ -32,6 +48,19 @@ define i32 @foo_f() {
; CHECK-DAG: .protected bar_p
@bar_p = protected alias i32* @bar
+; CHECK-DAG: test2 = bar+4
+@test2 = alias getelementptr(i32 *@bar, i32 1)
+
+; CHECK-DAG: test3 = 42
+@test3 = alias inttoptr(i32 42 to i32*)
+
+; CHECK-DAG: test4 = bar
+@test4 = alias inttoptr(i64 ptrtoint (i32* @bar to i64) to i32*)
+
+; CHECK-DAG: test5 = test2-bar
+@test5 = alias inttoptr(i32 sub (i32 ptrtoint (i32* @test2 to i32),
+ i32 ptrtoint (i32* @bar to i32)) to i32*)
+
; CHECK-DAG: .globl test
define i32 @test() {
entry:
diff --git a/test/CodeGen/X86/atom-fixup-lea4.ll b/test/CodeGen/X86/atom-fixup-lea4.ll
new file mode 100644
index 0000000..668574b
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea4.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mcpu=atom -mtriple=x86_64-linux
+
+%struct.ValueWrapper = type { double }
+%struct.ValueWrapper.6 = type { %struct.ValueWrapper.7 }
+%struct.ValueWrapper.7 = type { %struct.ValueWrapper.8 }
+%struct.ValueWrapper.8 = type { %struct.ValueWrapper }
+
+; Function Attrs: uwtable
+define linkonce_odr void @_ZN12ValueWrapperIS_IS_IS_IdEEEEC2Ev(%struct.ValueWrapper.6* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.ValueWrapper.6*, align 8
+ store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
+ %this1 = load %struct.ValueWrapper.6** %this.addr
+ %value = getelementptr inbounds %struct.ValueWrapper.6* %this1, i32 0, i32 0
+ call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
+ ret void
+}
+
+; Function Attrs: uwtable
+declare void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7*) unnamed_addr #0 align 2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/X86/atomic-load-store-wide.ll b/test/CodeGen/X86/atomic-load-store-wide.ll
index 17e04f0..7352d5a 100644
--- a/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=corei7 -march=x86 -verify-machineinstrs | FileCheck %s
; 64-bit load/store on x86-32
; FIXME: The generated code can be substantially improved.
diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll
index 1cfbc49..ffb7a3f 100644
--- a/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -1,6 +1,5 @@
-; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
-; RUN: llc -march=x86 -mattr=-cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=NOCMOV
-; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=x86 -mattr=+cmov,cx16 -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
+; RUN: llc -march=x86 -mattr=cx16 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
@sc64 = external global i64
@@ -9,87 +8,39 @@ define void @atomic_maxmin_i6432() {
%1 = atomicrmw max i64* @sc64, i64 5 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setl
-; LINUX: cmpl
-; LINUX: setl
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setg
-; LINUX: cmpl
-; LINUX: setg
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setb
-; LINUX: cmpl
-; LINUX: setb
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: seta
-; LINUX: cmpl
-; LINUX: seta
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
ret void
}
@@ -98,8 +49,8 @@ define void @atomic_maxmin_i6432() {
define void @tf_bug(i8* %ptr) nounwind {
; PIC-LABEL: tf_bug:
-; PIC: movl _id-L1$pb(
-; PIC: movl (_id-L1$pb)+4(
+; PIC-DAG: movl _id-L1$pb(
+; PIC-DAG: movl (_id-L1$pb)+4(
%tmp1 = atomicrmw add i64* @id, i64 1 seq_cst
%tmp2 = add i64 %tmp1, 1
%tmp3 = bitcast i8* %ptr to i64*
diff --git a/test/CodeGen/X86/atomic-ops-ancient-64.ll b/test/CodeGen/X86/atomic-ops-ancient-64.ll
new file mode 100644
index 0000000..18749b9
--- /dev/null
+++ b/test/CodeGen/X86/atomic-ops-ancient-64.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
+
+define i64 @test_add(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_add:
+; CHECK: calll __sync_fetch_and_add_8
+ %old = atomicrmw add i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_sub(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_sub:
+; CHECK: calll __sync_fetch_and_sub_8
+ %old = atomicrmw sub i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_and(i64* %andr, i64 %inc) {
+; CHECK-LABEL: test_and:
+; CHECK: calll __sync_fetch_and_and_8
+ %old = atomicrmw and i64* %andr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_or(i64* %orr, i64 %inc) {
+; CHECK-LABEL: test_or:
+; CHECK: calll __sync_fetch_and_or_8
+ %old = atomicrmw or i64* %orr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_xor(i64* %xorr, i64 %inc) {
+; CHECK-LABEL: test_xor:
+; CHECK: calll __sync_fetch_and_xor_8
+ %old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_nand(i64* %nandr, i64 %inc) {
+; CHECK-LABEL: test_nand:
+; CHECK: calll __sync_fetch_and_nand_8
+ %old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
+ ret i64 %old
+}
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
new file mode 100644
index 0000000..741d290
--- /dev/null
+++ b/test/CodeGen/X86/atomic128.ll
@@ -0,0 +1,316 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
+
+@var = global i128 0
+
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: movq %rsi, %rax
+; CHECK: movq %rcx, %rbx
+; CHECK: movq %r8, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
+ ret i128 %val
+}
+
+define void @fetch_and_nand(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rdx, %rcx
+; CHECK: andq [[INCHI]], %rcx
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: andq %rsi, %rbx
+; CHECK: notq %rbx
+; CHECK: notq %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+ %val = atomicrmw nand i128* %p, i128 %bits release
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_or(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: orq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: orq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw or i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_add(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_add:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: addq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: adcq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw add i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_sub(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_sub:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: subq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: sbbq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw sub i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_min(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_min:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setle [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw min i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_max(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_max:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setae [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setge [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw max i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umin(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umin:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setbe [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umin i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umax(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umax:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rax, %rsi
+; CHECK: setb [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: seta [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umax i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define i128 @atomic_load_seq_cst(i128* %p) {
+; CHECK-LABEL: atomic_load_seq_cst:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p seq_cst, align 16
+ ret i128 %r
+}
+
+define i128 @atomic_load_relaxed(i128* %p) {
+; CHECK: atomic_load_relaxed:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p monotonic, align 16
+ ret i128 %r
+}
+
+define void @atomic_store_seq_cst(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_seq_cst:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+; CHECK-NOT: callq ___sync_lock_test_and_set_16
+
+ store atomic i128 %in, i128* %p seq_cst, align 16
+ ret void
+}
+
+define void @atomic_store_release(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_release:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p release, align 16
+ ret void
+}
+
+define void @atomic_store_relaxed(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_relaxed:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p unordered, align 16
+ ret void
+}
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
index 45d3ff4..faaa4c4 100644
--- a/test/CodeGen/X86/atomic16.ll
+++ b/test/CodeGen/X86/atomic16.ll
@@ -4,8 +4,8 @@
@sc16 = external global i16
define void @atomic_fetch_add16() nounwind {
-; X64: atomic_fetch_add16
-; X32: atomic_fetch_add16
+; X64-LABEL: atomic_fetch_add16
+; X32-LABEL: atomic_fetch_add16
entry:
; 32-bit
%t1 = atomicrmw add i16* @sc16, i16 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub16() nounwind {
-; X64: atomic_fetch_sub16
-; X32: atomic_fetch_sub16
+; X64-LABEL: atomic_fetch_sub16
+; X32-LABEL: atomic_fetch_sub16
%t1 = atomicrmw sub i16* @sc16, i16 1 acquire
; X64: lock
; X64: decw
@@ -62,18 +62,18 @@ define void @atomic_fetch_sub16() nounwind {
}
define void @atomic_fetch_and16() nounwind {
-; X64: atomic_fetch_and16
-; X32: atomic_fetch_and16
+; X64-LABEL: atomic_fetch_and16
+; X32-LABEL: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
-; X64: andw
+; X64: andl
; X64: lock
; X64: cmpxchgw
-; X32: andw
+; X32: andl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
@@ -87,18 +87,18 @@ define void @atomic_fetch_and16() nounwind {
}
define void @atomic_fetch_or16() nounwind {
-; X64: atomic_fetch_or16
-; X32: atomic_fetch_or16
+; X64-LABEL: atomic_fetch_or16
+; X32-LABEL: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
-; X64: orw
+; X64: orl
; X64: lock
; X64: cmpxchgw
-; X32: orw
+; X32: orl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
@@ -112,18 +112,18 @@ define void @atomic_fetch_or16() nounwind {
}
define void @atomic_fetch_xor16() nounwind {
-; X64: atomic_fetch_xor16
-; X32: atomic_fetch_xor16
+; X64-LABEL: atomic_fetch_xor16
+; X32-LABEL: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
-; X64: xorw
+; X64: xorl
; X64: lock
; X64: cmpxchgw
-; X32: xorw
+; X32: xorl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
@@ -137,15 +137,15 @@ define void @atomic_fetch_xor16() nounwind {
}
define void @atomic_fetch_nand16(i16 %x) nounwind {
-; X64: atomic_fetch_nand16
-; X32: atomic_fetch_nand16
+; X64-LABEL: atomic_fetch_nand16
+; X32-LABEL: atomic_fetch_nand16
%t1 = atomicrmw nand i16* @sc16, i16 %x acquire
-; X64: andw
-; X64: notw
+; X64: andl
+; X64: notl
; X64: lock
; X64: cmpxchgw
-; X32: andw
-; X32: notw
+; X32: andl
+; X32: notl
; X32: lock
; X32: cmpxchgw
ret void
@@ -155,12 +155,16 @@ define void @atomic_fetch_nand16(i16 %x) nounwind {
define void @atomic_fetch_max16(i16 %x) nounwind {
%t1 = atomicrmw max i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -171,12 +175,16 @@ define void @atomic_fetch_max16(i16 %x) nounwind {
define void @atomic_fetch_min16(i16 %x) nounwind {
%t1 = atomicrmw min i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -187,12 +195,16 @@ define void @atomic_fetch_min16(i16 %x) nounwind {
define void @atomic_fetch_umax16(i16 %x) nounwind {
%t1 = atomicrmw umax i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -203,11 +215,16 @@ define void @atomic_fetch_umax16(i16 %x) nounwind {
define void @atomic_fetch_umin16(i16 %x) nounwind {
%t1 = atomicrmw umin i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll
index 474c0e6..4f2cbe0 100644
--- a/test/CodeGen/X86/atomic32.ll
+++ b/test/CodeGen/X86/atomic32.ll
@@ -5,8 +5,8 @@
@sc32 = external global i32
define void @atomic_fetch_add32() nounwind {
-; X64: atomic_fetch_add32
-; X32: atomic_fetch_add32
+; X64-LABEL: atomic_fetch_add32:
+; X32-LABEL: atomic_fetch_add32:
entry:
; 32-bit
%t1 = atomicrmw add i32* @sc32, i32 1 acquire
@@ -35,8 +35,8 @@ entry:
}
define void @atomic_fetch_sub32() nounwind {
-; X64: atomic_fetch_sub32
-; X32: atomic_fetch_sub32
+; X64-LABEL: atomic_fetch_sub32:
+; X32-LABEL: atomic_fetch_sub32:
%t1 = atomicrmw sub i32* @sc32, i32 1 acquire
; X64: lock
; X64: decl
@@ -63,8 +63,8 @@ define void @atomic_fetch_sub32() nounwind {
}
define void @atomic_fetch_and32() nounwind {
-; X64: atomic_fetch_and32
-; X32: atomic_fetch_and32
+; X64-LABEL: atomic_fetch_and32:
+; X32-LABEL: atomic_fetch_and32:
%t1 = atomicrmw and i32* @sc32, i32 3 acquire
; X64: lock
; X64: andl $3
@@ -88,8 +88,8 @@ define void @atomic_fetch_and32() nounwind {
}
define void @atomic_fetch_or32() nounwind {
-; X64: atomic_fetch_or32
-; X32: atomic_fetch_or32
+; X64-LABEL: atomic_fetch_or32:
+; X32-LABEL: atomic_fetch_or32:
%t1 = atomicrmw or i32* @sc32, i32 3 acquire
; X64: lock
; X64: orl $3
@@ -113,8 +113,8 @@ define void @atomic_fetch_or32() nounwind {
}
define void @atomic_fetch_xor32() nounwind {
-; X64: atomic_fetch_xor32
-; X32: atomic_fetch_xor32
+; X64-LABEL: atomic_fetch_xor32:
+; X32-LABEL: atomic_fetch_xor32:
%t1 = atomicrmw xor i32* @sc32, i32 3 acquire
; X64: lock
; X64: xorl $3
@@ -138,8 +138,8 @@ define void @atomic_fetch_xor32() nounwind {
}
define void @atomic_fetch_nand32(i32 %x) nounwind {
-; X64: atomic_fetch_nand32
-; X32: atomic_fetch_nand32
+; X64-LABEL: atomic_fetch_nand32:
+; X32-LABEL: atomic_fetch_nand32:
%t1 = atomicrmw nand i32* @sc32, i32 %x acquire
; X64: andl
; X64: notl
@@ -155,19 +155,22 @@ define void @atomic_fetch_nand32(i32 %x) nounwind {
}
define void @atomic_fetch_max32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_max32:
+; X32-LABEL: atomic_fetch_max32:
+
%t1 = atomicrmw max i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jl
+; NOCMOV: subl
+; NOCMOV: jge
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -177,19 +180,23 @@ define void @atomic_fetch_max32(i32 %x) nounwind {
}
define void @atomic_fetch_min32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_min32:
+; X32-LABEL: atomic_fetch_min32:
+; NOCMOV-LABEL: atomic_fetch_min32:
+
%t1 = atomicrmw min i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jg
+; NOCMOV: subl
+; NOCMOV: jle
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -199,19 +206,23 @@ define void @atomic_fetch_min32(i32 %x) nounwind {
}
define void @atomic_fetch_umax32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax32:
+; X32-LABEL: atomic_fetch_umax32:
+; NOCMOV-LABEL: atomic_fetch_umax32:
+
%t1 = atomicrmw umax i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jb
+; NOCMOV: subl
+; NOCMOV: ja
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -221,19 +232,23 @@ define void @atomic_fetch_umax32(i32 %x) nounwind {
}
define void @atomic_fetch_umin32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin32:
+; X32-LABEL: atomic_fetch_umin32:
+; NOCMOV-LABEL: atomic_fetch_umin32:
+
%t1 = atomicrmw umin i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: ja
+; NOCMOV: subl
+; NOCMOV: jb
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -243,6 +258,9 @@ define void @atomic_fetch_umin32(i32 %x) nounwind {
}
define void @atomic_fetch_cmpxchg32() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg32:
+; X32-LABEL: atomic_fetch_cmpxchg32:
+
%t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
; X64: lock
; X64: cmpxchgl
@@ -254,6 +272,9 @@ define void @atomic_fetch_cmpxchg32() nounwind {
}
define void @atomic_fetch_store32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_store32:
+; X32-LABEL: atomic_fetch_store32:
+
store atomic i32 %x, i32* @sc32 release, align 4
; X64-NOT: lock
; X64: movl
@@ -265,6 +286,9 @@ define void @atomic_fetch_store32(i32 %x) nounwind {
}
define void @atomic_fetch_swap32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap32:
+; X32-LABEL: atomic_fetch_swap32:
+
%t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
; X64-NOT: lock
; X64: xchgl
diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll
index 4f55edc..11b4e68 100644
--- a/test/CodeGen/X86/atomic64.ll
+++ b/test/CodeGen/X86/atomic64.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X64: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
@@ -22,7 +23,8 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X64: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
@@ -40,7 +42,8 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X64: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and64:
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
@@ -56,7 +59,8 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X64: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
@@ -72,7 +76,8 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X64: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor64:
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
@@ -88,8 +93,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X64: atomic_fetch_nand64
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
@@ -107,8 +112,10 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max64:
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -126,8 +133,10 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -145,8 +154,10 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax64:
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -164,8 +175,10 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -183,6 +196,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg64:
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
@@ -194,6 +209,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
@@ -205,6 +222,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll
index c0f7267..1c4b0f4 100644
--- a/test/CodeGen/X86/atomic6432.ll
+++ b/test/CodeGen/X86/atomic6432.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X32: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X32: addl
@@ -30,20 +31,21 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X32: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-1
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-3
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-5
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
@@ -56,15 +58,16 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X32: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and:64
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
-; X32: andl
-; X32: andl
+; X32: andl $3
+; X32-NOT: andl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw and i64* @sc64, i64 5 acquire
-; X32: andl
-; X32: andl
+ %t2 = atomicrmw and i64* @sc64, i64 4294967297 acquire
+; X32: andl $1
+; X32: andl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
@@ -77,15 +80,16 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X32: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
-; X32: orl
-; X32: orl
+; X32: orl $3
+; X32-NOT: orl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw or i64* @sc64, i64 5 acquire
-; X32: orl
-; X32: orl
+ %t2 = atomicrmw or i64* @sc64, i64 4294967297 acquire
+; X32: orl $1
+; X32: orl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
@@ -98,15 +102,16 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X32: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor:64
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X32: xorl
-; X32: xorl
+; X32-NOT: xorl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
-; X32: xorl
-; X32: xorl
+ %t2 = atomicrmw xor i64* @sc64, i64 4294967297 acquire
+; X32: xorl $1
+; X32: xorl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
@@ -119,7 +124,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X32: andl
; X32: andl
@@ -132,10 +138,11 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max:64
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -145,10 +152,11 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -158,10 +166,11 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax:64
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -171,10 +180,11 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -184,6 +194,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg:64
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X32: lock
; X32: cmpxchg8b
@@ -192,6 +204,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X32: lock
; X32: cmpxchg8b
@@ -200,6 +214,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X32: lock
; X32: xchg8b
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
index 203b26f..5eef9b2 100644
--- a/test/CodeGen/X86/atomic8.ll
+++ b/test/CodeGen/X86/atomic8.ll
@@ -4,8 +4,8 @@
@sc8 = external global i8
define void @atomic_fetch_add8() nounwind {
-; X64: atomic_fetch_add8
-; X32: atomic_fetch_add8
+; X64-LABEL: atomic_fetch_add8:
+; X32-LABEL: atomic_fetch_add8:
entry:
; 32-bit
%t1 = atomicrmw add i8* @sc8, i8 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub8() nounwind {
-; X64: atomic_fetch_sub8
-; X32: atomic_fetch_sub8
+; X64-LABEL: atomic_fetch_sub8:
+; X32-LABEL: atomic_fetch_sub8:
%t1 = atomicrmw sub i8* @sc8, i8 1 acquire
; X64: lock
; X64: decb
@@ -62,8 +62,8 @@ define void @atomic_fetch_sub8() nounwind {
}
define void @atomic_fetch_and8() nounwind {
-; X64: atomic_fetch_and8
-; X32: atomic_fetch_and8
+; X64-LABEL: atomic_fetch_and8:
+; X32-LABEL: atomic_fetch_and8:
%t1 = atomicrmw and i8* @sc8, i8 3 acquire
; X64: lock
; X64: andb $3
@@ -87,8 +87,8 @@ define void @atomic_fetch_and8() nounwind {
}
define void @atomic_fetch_or8() nounwind {
-; X64: atomic_fetch_or8
-; X32: atomic_fetch_or8
+; X64-LABEL: atomic_fetch_or8:
+; X32-LABEL: atomic_fetch_or8:
%t1 = atomicrmw or i8* @sc8, i8 3 acquire
; X64: lock
; X64: orb $3
@@ -112,8 +112,8 @@ define void @atomic_fetch_or8() nounwind {
}
define void @atomic_fetch_xor8() nounwind {
-; X64: atomic_fetch_xor8
-; X32: atomic_fetch_xor8
+; X64-LABEL: atomic_fetch_xor8:
+; X32-LABEL: atomic_fetch_xor8:
%t1 = atomicrmw xor i8* @sc8, i8 3 acquire
; X64: lock
; X64: xorb $3
@@ -137,8 +137,8 @@ define void @atomic_fetch_xor8() nounwind {
}
define void @atomic_fetch_nand8(i8 %x) nounwind {
-; X64: atomic_fetch_nand8
-; X32: atomic_fetch_nand8
+; X64-LABEL: atomic_fetch_nand8:
+; X32-LABEL: atomic_fetch_nand8:
%t1 = atomicrmw nand i8* @sc8, i8 %x acquire
; X64: andb
; X64: notb
@@ -154,14 +154,18 @@ define void @atomic_fetch_nand8(i8 %x) nounwind {
}
define void @atomic_fetch_max8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_max8:
+; X32-LABEL: atomic_fetch_max8:
%t1 = atomicrmw max i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -170,14 +174,18 @@ define void @atomic_fetch_max8(i8 %x) nounwind {
}
define void @atomic_fetch_min8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_min8:
+; X32-LABEL: atomic_fetch_min8:
%t1 = atomicrmw min i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -186,14 +194,18 @@ define void @atomic_fetch_min8(i8 %x) nounwind {
}
define void @atomic_fetch_umax8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax8:
+; X32-LABEL: atomic_fetch_umax8:
%t1 = atomicrmw umax i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -202,13 +214,18 @@ define void @atomic_fetch_umax8(i8 %x) nounwind {
}
define void @atomic_fetch_umin8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin8:
+; X32-LABEL: atomic_fetch_umin8:
%t1 = atomicrmw umin i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -217,6 +234,8 @@ define void @atomic_fetch_umin8(i8 %x) nounwind {
}
define void @atomic_fetch_cmpxchg8() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg8:
+; X32-LABEL: atomic_fetch_cmpxchg8:
%t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire acquire
; X64: lock
; X64: cmpxchgb
@@ -228,6 +247,8 @@ define void @atomic_fetch_cmpxchg8() nounwind {
}
define void @atomic_fetch_store8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_store8:
+; X32-LABEL: atomic_fetch_store8:
store atomic i8 %x, i8* @sc8 release, align 4
; X64-NOT: lock
; X64: movb
@@ -239,6 +260,8 @@ define void @atomic_fetch_store8(i8 %x) nounwind {
}
define void @atomic_fetch_swap8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap8:
+; X32-LABEL: atomic_fetch_swap8:
%t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
; X64-NOT: lock
; X64: xchgb
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index b3045ed..d0ab28a 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov,cx16 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -101,26 +101,28 @@ entry:
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
; CHECK: lock
; CHECK: cmpxchgl
- %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
+ %pair16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
+ %16 = extractvalue { i32, i1 } %pair16, 0
store i32 %16, i32* %old
; CHECK: lock
; CHECK: cmpxchgl
- %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
+ %pair17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
+ %17 = extractvalue { i32, i1 } %pair17, 0
store i32 %17, i32* %old
; CHECK: movl [[R17atomic:.*]], %eax
- ; CHECK: movl $1401, %[[R17mask:[a-z]*]]
- ; CHECK: andl %eax, %[[R17mask]]
- ; CHECK: notl %[[R17mask]]
+ ; CHECK: movl %eax, %[[R17mask:[a-z]*]]
+ ; CHECK: notl %[[R17mask]]
+ ; CHECK: orl $-1402, %[[R17mask]]
; CHECK: lock
; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]]
; CHECK: jne
; CHECK: movl %eax,
%18 = atomicrmw nand i32* %val2, i32 1401 monotonic
store i32 %18, i32* %old
- ; CHECK: andl
- ; CHECK: andl
; CHECK: notl
; CHECK: notl
+ ; CHECK: orl $252645135
+ ; CHECK: orl $252645135
; CHECK: lock
; CHECK: cmpxchg8b
%19 = atomicrmw nand i64* %temp64, i64 17361641481138401520 monotonic
@@ -133,6 +135,7 @@ entry:
; CHECK: lock
; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}})
- %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
+ %pair0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
+ %0 = extractvalue { i32, i1 } %pair0, 0
ret void
}
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
index e21c7a0..d2a22d7 100644
--- a/test/CodeGen/X86/avx-blend.ll
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -110,7 +110,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
;CHECK-LABEL: vsel_double4:
;CHECK-NOT: vinsertf128
-;CHECK: vshufpd $10
+;CHECK: vblendpd $10
;CHECK-NEXT: ret
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v1, <4 x double> %v2
@@ -158,3 +158,45 @@ define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd)
declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
+
+;; 4 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+; Equivalent select mask is <i1 true, i1 false, i1 true, i1 false>.
+; Big endian representation is 0101 = 5.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; Inverted mask: 1010 = 10.
+; According to the ABI:
+; a is in xmm0 => first argument is xmm0.
+; b is in xmm1 => second argument is xmm1.
+; Result is in xmm0 => destination argument.
+; CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xfloat
+define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
+; CHECK: vblendps $190, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
+ ret <8 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xdouble
+define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
+; CHECK: vblendpd $2, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xi64
+define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK: vblendpd $13, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x i64> %1
+}
diff --git a/test/CodeGen/X86/avx-intel-ocl.ll b/test/CodeGen/X86/avx-intel-ocl.ll
index 7337815..3e051bf 100644
--- a/test/CodeGen/X86/avx-intel-ocl.ll
+++ b/test/CodeGen/X86/avx-intel-ocl.ll
@@ -7,21 +7,21 @@ declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *)
declare <16 x float> @func_float16(<16 x float>, <16 x float>)
declare i32 @func_int(i32, i32)
-; WIN64: testf16_inp
+; WIN64-LABEL: testf16_inp
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: leaq {{.*}}(%rsp), %rcx
; WIN64: call
; WIN64: ret
-; X32: testf16_inp
+; X32-LABEL: testf16_inp
; X32: movl %eax, (%esp)
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: call
; X32: ret
-; X64: testf16_inp
+; X64-LABEL: testf16_inp
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: leaq {{.*}}(%rsp), %rdi
@@ -41,14 +41,14 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
;test calling conventions - preserved registers
; preserved ymm6-ymm15
-; WIN64: testf16_regs
+; WIN64-LABEL: testf16_regs
; WIN64: call
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: ret
; preserved ymm8-ymm15
-; X64: testf16_regs
+; X64-LABEL: testf16_regs
; X64: call
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
@@ -65,28 +65,30 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
}
; test calling conventions - prolog and epilog
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
+; WIN64-LABEL: test_prolog_epilog
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
; WIN64: call
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+
+; X64-LABEL: test_prolog_epilog
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
@@ -111,12 +113,14 @@ define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x fl
; test functions with integer parameters
; pass parameters on stack for 32-bit platform
+; X32-LABEL: test_int
; X32: movl {{.*}}, 4(%esp)
; X32: movl {{.*}}, (%esp)
; X32: call
; X32: addl {{.*}}, %eax
; pass parameters in registers for 64-bit platform
+; X64-LABEL: test_int
; X64: leal {{.*}}, %edi
; X64: movl {{.*}}, %esi
; X64: call
@@ -128,21 +132,21 @@ define i32 @test_int(i32 %a, i32 %b) nounwind {
ret i32 %c
}
-; WIN64: test_float4
+; WIN64-LABEL: test_float4
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64: ret
-; X64: test_float4
+; X64-LABEL: test_float4
; X64-NOT: vzeroupper
; X64: call
; X64-NOT: vzeroupper
; X64: call
; X64: ret
-; X32: test_float4
+; X32-LABEL: test_float4
; X32: vzeroupper
; X32: call
; X32: vzeroupper
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index 0be83f6..ce31161 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -2219,14 +2219,6 @@ define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind
-define <4 x double> @test_x86_avx_vbroadcast_sd_256(i8* %a0) {
- ; CHECK: vbroadcastsd
- %res = call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %a0) ; <<4 x double>> [#uses=1]
- ret <4 x double> %res
-}
-declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*) nounwind readonly
-
-
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; CHECK: vbroadcastf128
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
@@ -2243,22 +2235,6 @@ define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
-define <4 x float> @test_x86_avx_vbroadcast_ss(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*) nounwind readonly
-
-
-define <8 x float> @test_x86_avx_vbroadcast_ss_256(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %a0) ; <<8 x float>> [#uses=1]
- ret <8 x float> %res
-}
-declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*) nounwind readonly
-
-
define <2 x double> @test_x86_avx_vextractf128_pd_256(<4 x double> %a0) {
; CHECK: vextractf128
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 7) ; <<2 x double>> [#uses=1]
diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll
index f407ba4..4a996d7 100644
--- a/test/CodeGen/X86/avx-shuffle.ll
+++ b/test/CodeGen/X86/avx-shuffle.ll
@@ -5,8 +5,10 @@ define <4 x float> @test1(<4 x float> %a) nounwind {
%b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef>
ret <4 x float> %b
; CHECK-LABEL: test1:
-; CHECK: vshufps
-; CHECK: vpshufd
+;; TODO: This test could be improved by removing the xor instruction and
+;; having vinsertps zero out the needed elements.
+; CHECK: vxorps
+; CHECK: vinsertps
}
; rdar://10538417
@@ -23,7 +25,7 @@ define <4 x i64> @test3(<4 x i64> %a, <4 x i64> %b) nounwind {
%c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 undef>
ret <4 x i64> %c
; CHECK-LABEL: test3:
-; CHECK: vperm2f128
+; CHECK: vblendpd
; CHECK: ret
}
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index 5d07815..b1b2f8b 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -43,13 +43,10 @@ entry:
ret <4 x double> %vecinit6.i
}
-; Test this simple opt:
+; Test this turns into a broadcast:
; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
-; To:
-; shuffle (vload ptr)), undef, <1, 1, 1, 1>
-; CHECK: vmovdqa
-; CHECK-NEXT: vpshufd $-1
-; CHECK-NEXT: vinsertf128 $1
+;
+; CHECK: vbroadcastss
define <8 x float> @funcE() nounwind {
allocas:
%udx495 = alloca [18 x [18 x float]], align 32
diff --git a/test/CodeGen/X86/avx-vperm2f128.ll b/test/CodeGen/X86/avx-vperm2f128.ll
index caa21e5..c20775b 100644
--- a/test/CodeGen/X86/avx-vperm2f128.ll
+++ b/test/CodeGen/X86/avx-vperm2f128.ll
@@ -9,7 +9,7 @@ entry:
}
; CHECK: _B
-; CHECK: vperm2f128 $48
+; CHECK: vblendps $240
define <8 x float> @B(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
diff --git a/test/CodeGen/X86/avx-vshufp.ll b/test/CodeGen/X86/avx-vshufp.ll
index 45883b7..ad3dbc1 100644
--- a/test/CodeGen/X86/avx-vshufp.ll
+++ b/test/CodeGen/X86/avx-vshufp.ll
@@ -32,14 +32,14 @@ entry:
ret <8 x i32> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x double> @B(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x double> @B2(<4 x double>* %a, <4 x double>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x double>* %a
@@ -48,14 +48,14 @@ entry:
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x i64> @B3(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x i64> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x i64> @B4(<4 x i64>* %a, <4 x i64>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x i64>* %a
@@ -71,7 +71,7 @@ entry:
ret <8 x float> %shuffle
}
-; CHECK: vshufpd $2, %ymm
+; CHECK: vblendpd $2, %ymm
define <4 x double> @D(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 undef>
diff --git a/test/CodeGen/X86/avx2-shuffle.ll b/test/CodeGen/X86/avx2-shuffle.ll
index 0e6dd29..185b989 100644
--- a/test/CodeGen/X86/avx2-shuffle.ll
+++ b/test/CodeGen/X86/avx2-shuffle.ll
@@ -60,6 +60,24 @@ define <4 x i64> @blend_test4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline
ret <4 x i64> %t
}
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_test5
+; CHECK: vpblendd $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %1
+}
+
+; CHECK-LABEL: @blend_test6
+; CHECK: vpblendw $134, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+define <16 x i16> @blend_test6(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 23,
+ i32 8, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 31>
+ ret <16 x i16> %1
+}
+
; CHECK: vpshufhw $27, %ymm
define <16 x i16> @vpshufhw(<16 x i16> %src1) nounwind uwtable readnone ssp {
entry:
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 2476ea1..f5cda96 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -192,6 +192,14 @@ define <16 x double> @uitof64(<16 x i32> %a) nounwind {
ret <16 x double> %b
}
+; CHECK-LABEL: uitof64_256
+; CHECK: vcvtudq2pd
+; CHECK: ret
+define <4 x double> @uitof64_256(<4 x i32> %a) nounwind {
+ %b = uitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %b
+}
+
; CHECK-LABEL: uitof32
; CHECK: vcvtudq2ps
; CHECK: ret
diff --git a/test/CodeGen/X86/avx512-inc-dec.ll b/test/CodeGen/X86/avx512-inc-dec.ll
new file mode 100644
index 0000000..f04ca87
--- /dev/null
+++ b/test/CodeGen/X86/avx512-inc-dec.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+;CHECK-LABEL: test
+;CHECK-NOT: dec
+;CHECK_NOT: enc
+;CHECK: ret
+define i32 @test(i32 %a, i32 %b) {
+ %a1 = add i32 %a, -1
+ %b1 = add i32 %b, 1
+ %res = mul i32 %a1, %b1
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index e19841a..18cfcfe 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -311,7 +311,6 @@ define <8 x i64> @test_conflict_q(<8 x i64> %a) {
declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
-
define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
; CHECK: vpconflictd
%res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
@@ -324,6 +323,57 @@ define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
ret <8 x i64> %res
}
+define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
+ ; CHECK: movw $-1, %ax
+ ; CHECK: vpxor
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
+ ; CHECK: movb $-1, %al
+ ; CHECK: vpxor
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+
+define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <16 x i32> @test_ctlz_d(<16 x i32> %a) {
+ ; CHECK-LABEL: test_ctlz_d
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) nounwind readonly
+
+define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
+ ; CHECK-LABEL: test_ctlz_q
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
+
define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK: vblendmps
%res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
@@ -544,4 +594,20 @@ define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%p
ret <16 x float> %res
}
+define <16 x float> @test_vpermt2ps_mask(<16 x float>%x, <16 x float>%y, <16 x i32>%perm, i16 %mask) {
+; CHECK-LABEL: test_vpermt2ps_mask:
+; CHECK: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x7f,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 %mask)
+ ret <16 x float> %res
+}
+
declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
+
+define <8 x i64> @test_vmovntdqa(i8 *%x) {
+; CHECK-LABEL: test_vmovntdqa:
+; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07]
+ %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*)
diff --git a/test/CodeGen/X86/avx512-nontemporal.ll b/test/CodeGen/X86/avx512-nontemporal.ll
new file mode 100644
index 0000000..ef50cdb
--- /dev/null
+++ b/test/CodeGen/X86/avx512-nontemporal.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx512f | FileCheck %s
+
+define void @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x double> %CC, i32 %D, <8 x i64> %E, <8 x i64> %EE) {
+; CHECK: vmovntps %z
+ %cast = bitcast i8* %B to <16 x float>*
+ %A2 = fadd <16 x float> %A, %AA
+ store <16 x float> %A2, <16 x float>* %cast, align 64, !nontemporal !0
+; CHECK: vmovntdq %z
+ %cast1 = bitcast i8* %B to <8 x i64>*
+ %E2 = add <8 x i64> %E, %EE
+ store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
+; CHECK: vmovntpd %z
+ %cast2 = bitcast i8* %B to <8 x double>*
+ %C2 = fadd <8 x double> %C, %CC
+ store <8 x double> %C2, <8 x double>* %cast2, align 64, !nontemporal !0
+ ret void
+}
+
+!0 = metadata !{i32 1}
diff --git a/test/CodeGen/X86/avx512-shuffle.ll b/test/CodeGen/X86/avx512-shuffle.ll
index 23ddc3a..b99e89a 100644
--- a/test/CodeGen/X86/avx512-shuffle.ll
+++ b/test/CodeGen/X86/avx512-shuffle.ll
@@ -56,6 +56,16 @@ define <8 x double> @test5(<8 x double> %a, <8 x double> %b) nounwind {
ret <8 x double> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test5m:
+; CHECK: vpermt2pd {{.* {%k[1-7]} {z}}}
+define <8 x double> @test5m(<8 x double> %a, <8 x double> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
; CHECK-LABEL: test6:
; CHECK: vpermq $30
; CHECK: ret
@@ -72,6 +82,27 @@ define <8 x i64> @test7(<8 x i64> %a, <8 x i64> %b) nounwind {
ret <8 x i64> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test7m:
+; CHECK: vpermt2q {{.* {%k[1-7]} {z}}}
+define <8 x i64> @test7m(<8 x i64> %a, <8 x i64> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test7mm:
+; CHECK: vpermt2q {{\(.*\).* {%k[1-7]} {z}}}
+define <8 x i64> @test7mm(<8 x i64> %a, <8 x i64> *%pb, i8 %mask) nounwind {
+ %b = load <8 x i64>* %pb
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
; CHECK-LABEL: test8:
; CHECK: vpermt2d
; CHECK: ret
@@ -80,6 +111,27 @@ define <16 x i32> @test8(<16 x i32> %a, <16 x i32> %b) nounwind {
ret <16 x i32> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test8m:
+; CHECK: vpermt2d {{.* {%k[1-7]} {z}}}
+define <16 x i32> @test8m(<16 x i32> %a, <16 x i32> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test8mm:
+; CHECK: vpermt2d {{\(.*\).* {%k[1-7]} {z}}}
+define <16 x i32> @test8mm(<16 x i32> %a, <16 x i32> *%pb, i16 %mask) nounwind {
+ %b = load <16 x i32> * %pb
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
; CHECK-LABEL: test9:
; CHECK: vpermt2ps
; CHECK: ret
@@ -88,6 +140,16 @@ define <16 x float> @test9(<16 x float> %a, <16 x float> %b) nounwind {
ret <16 x float> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test9m:
+; CHECK: vpermt2ps {{.*}} {%k{{.}}} {z}
+define <16 x float> @test9m(<16 x float> %a, <16 x float> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x float> %c, <16 x float> zeroinitializer
+ ret <16 x float> %res
+}
+
; CHECK-LABEL: test10:
; CHECK: vpermt2ps (
; CHECK: ret
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index 3c931db..9dc960d 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mcpu=x86-64 | FileCheck %s -check-prefix=CHECK-NOSSSE3
; RUN: llc < %s -mcpu=core2 | FileCheck %s -check-prefix=CHECK-SSSE3
; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK-AVX2
+; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s -check-prefix=CHECK-WIDE-AVX2
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -31,6 +32,10 @@ entry:
; CHECK-AVX2-LABEL: @test1
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test1
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <4 x i32> @test2(<4 x i32> %v) #0 {
@@ -52,6 +57,10 @@ entry:
; CHECK-AVX2-LABEL: @test2
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test2
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <2 x i64> @test3(<2 x i64> %v) #0 {
@@ -71,6 +80,10 @@ entry:
; CHECK-AVX2-LABEL: @test3
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test3
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
@@ -90,6 +103,10 @@ entry:
; CHECK-AVX2-LABEL: @test4
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test4
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <8 x i32> @test5(<8 x i32> %v) #0 {
@@ -105,6 +122,10 @@ entry:
; CHECK-AVX2-LABEL: @test5
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test5
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <4 x i64> @test6(<4 x i64> %v) #0 {
@@ -120,6 +141,10 @@ entry:
; CHECK-AVX2-LABEL: @test6
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test6
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
@@ -138,6 +163,10 @@ entry:
; CHECK-AVX2: vpshufb
; CHECK-AVX2: vpsrld $16
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test7
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
attributes #0 = { nounwind uwtable }
diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll
index cdcdc96..149d537 100644
--- a/test/CodeGen/X86/cmp.ll
+++ b/test/CodeGen/X86/cmp.ll
@@ -198,3 +198,16 @@ define i32 @test14(i32 %mask, i32 %base, i32 %intra) #0 {
; CHECK: shrl $7, %edi
; CHECK-NEXT: cmovnsl %edx, %esi
}
+
+; PR19964
+define zeroext i1 @test15(i32 %bf.load, i32 %n) {
+ %bf.lshr = lshr i32 %bf.load, 16
+ %cmp2 = icmp eq i32 %bf.lshr, 0
+ %cmp5 = icmp uge i32 %bf.lshr, %n
+ %.cmp5 = or i1 %cmp2, %cmp5
+ ret i1 %.cmp5
+
+; CHECK-LABEL: test15:
+; CHECK: shrl $16, %edi
+; CHECK: cmpl %esi, %edi
+}
diff --git a/test/CodeGen/X86/cmpxchg-i1.ll b/test/CodeGen/X86/cmpxchg-i1.ll
new file mode 100644
index 0000000..a21ab59
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i1.ll
@@ -0,0 +1,87 @@
+; RUN: llc -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i64* %addr, i64 %desired, i64 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchgq
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i64* %addr, i64 %desired, i64 %new seq_cst seq_cst
+ %success = extractvalue { i64, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+define i64 @cmpxchg_sext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_sext:
+; CHECK-DAG: cmpxchgl
+; CHECK-NOT: cmpl
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = sext i1 %success to i64
+ ret i64 %mask
+}
+
+define i32 @cmpxchg_zext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = zext i1 %success to i32
+ ret i32 %mask
+}
+
+
+define i32 @cmpxchg_use_eflags_and_val(i32* %addr, i32 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+; CHECK: movl (%rdi), %e[[OLDVAL:[a-z0-9]+]]
+
+; CHECK: [[LOOPBB:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: leal (%r[[OLDVAL]],%rsi), [[NEW:%[a-z0-9]+]]
+; CHECK: cmpxchgl [[NEW]], (%rdi)
+; CHECK-NOT: cmpl
+; CHECK: jne [[LOOPBB]]
+
+ ; Result already in %eax
+; CHECK: retq
+entry:
+ %init = load atomic i32* %addr seq_cst, align 4
+ br label %loop
+
+loop:
+ %old = phi i32 [%init, %entry], [%oldval, %loop]
+ %new = add i32 %old, %offset
+ %pair = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst seq_cst
+ %oldval = extractvalue { i32, i1 } %pair, 0
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i32 %oldval
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/cmpxchg-i128-i1.ll b/test/CodeGen/X86/cmpxchg-i128-i1.ll
new file mode 100644
index 0000000..4dd3001
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i128-i1.ll
@@ -0,0 +1,83 @@
+; RUN: llc -mcpu=core-avx2 -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+; Can't use the flags here because cmpxchg16b only sets ZF.
+define i1 @cmpxchg_arithcmp(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_arithcmp:
+; CHECK: cmpxchg16b
+; CHECK: cmpq
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = icmp sge i128 %oldval, %desired
+ ret i1 %success
+}
+
+define i128 @cmpxchg_zext(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ %mask = zext i1 %success to i128
+ ret i128 %mask
+}
+
+
+define i128 @cmpxchg_use_eflags_and_val(i128* %addr, i128 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: jne
+entry:
+ %init = load atomic i128* %addr seq_cst, align 16
+ br label %loop
+
+loop:
+ %old = phi i128 [%init, %entry], [%oldval, %loop]
+ %new = add i128 %old, %offset
+
+ %pair = cmpxchg i128* %addr, i128 %old, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = extractvalue { i128, i1 } %pair, 1
+
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i128 %old
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll
index 468b70b..bb08a0e 100644
--- a/test/CodeGen/X86/coalescer-remat.ll
+++ b/test/CodeGen/X86/coalescer-remat.ll
@@ -5,7 +5,8 @@
define i32 @main() nounwind {
entry:
- %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
+ %t0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
+ %0 = extractvalue { i64, i1 } %t0, 0
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind
ret i32 0
}
diff --git a/test/CodeGen/X86/coff-comdat.ll b/test/CodeGen/X86/coff-comdat.ll
new file mode 100644
index 0000000..bf27b2f
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple i386-pc-win32 < %s | FileCheck %s
+
+$f1 = comdat any
+@v1 = global i32 0, comdat $f1
+define void @f1() comdat $f1 {
+ ret void
+}
+
+$f2 = comdat exactmatch
+@v2 = global i32 0, comdat $f2
+define void @f2() comdat $f2 {
+ ret void
+}
+
+$f3 = comdat largest
+@v3 = global i32 0, comdat $f3
+define void @f3() comdat $f3 {
+ ret void
+}
+
+$f4 = comdat noduplicates
+@v4 = global i32 0, comdat $f4
+define void @f4() comdat $f4 {
+ ret void
+}
+
+$f5 = comdat samesize
+@v5 = global i32 0, comdat $f5
+define void @f5() comdat $f5 {
+ ret void
+}
+
+$f6 = comdat samesize
+@v6 = global i32 0, comdat $f6
+@f6 = global i32 0, comdat $f6
+
+$"\01@f7@0" = comdat any
+define x86_fastcallcc void @"\01@v7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+define x86_fastcallcc void @"\01@f7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+
+$f8 = comdat any
+define x86_fastcallcc void @v8() comdat $f8 {
+ ret void
+}
+define x86_fastcallcc void @f8() comdat $f8 {
+ ret void
+}
+
+$vftable = comdat largest
+
+@some_name = private unnamed_addr constant [2 x i8*] zeroinitializer, comdat $vftable
+@vftable = alias getelementptr([2 x i8*]* @some_name, i32 0, i32 1)
+
+; CHECK: .section .text,"xr",discard,_f1
+; CHECK: .globl _f1
+; CHECK: .section .text,"xr",same_contents,_f2
+; CHECK: .globl _f2
+; CHECK: .section .text,"xr",largest,_f3
+; CHECK: .globl _f3
+; CHECK: .section .text,"xr",one_only,_f4
+; CHECK: .globl _f4
+; CHECK: .section .text,"xr",same_size,_f5
+; CHECK: .globl _f5
+; CHECK: .section .text,"xr",associative,@f7@0
+; CHECK: .globl @v7@0
+; CHECK: .section .text,"xr",discard,@f7@0
+; CHECK: .globl @f7@0
+; CHECK: .section .text,"xr",associative,@f8@0
+; CHECK: .globl @v8@0
+; CHECK: .section .text,"xr",discard,@f8@0
+; CHECK: .globl @f8@0
+; CHECK: .section .bss,"bw",associative,_f1
+; CHECK: .globl _v1
+; CHECK: .section .bss,"bw",associative,_f2
+; CHECK: .globl _v2
+; CHECK: .section .bss,"bw",associative,_f3
+; CHECK: .globl _v3
+; CHECK: .section .bss,"bw",associative,_f4
+; CHECK: .globl _v4
+; CHECK: .section .bss,"bw",associative,_f5
+; CHECK: .globl _v5
+; CHECK: .section .bss,"bw",associative,_f6
+; CHECK: .globl _v6
+; CHECK: .section .bss,"bw",same_size,_f6
+; CHECK: .globl _f6
+; CHECK: .section .rdata,"rd",largest,_vftable
+; CHECK: .globl _vftable
+; CHECK: _vftable = L_some_name+4
diff --git a/test/CodeGen/X86/coff-comdat2.ll b/test/CodeGen/X86/coff-comdat2.ll
new file mode 100644
index 0000000..6744b5b
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat2.ll
@@ -0,0 +1,9 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@foo = global i32 0
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' is not a key for it's COMDAT.
diff --git a/test/CodeGen/X86/coff-comdat3.ll b/test/CodeGen/X86/coff-comdat3.ll
new file mode 100644
index 0000000..76e464b
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat3.ll
@@ -0,0 +1,8 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' does not exist.
diff --git a/test/CodeGen/X86/combine-64bit-vec-binop.ll b/test/CodeGen/X86/combine-64bit-vec-binop.ll
new file mode 100644
index 0000000..8440fda
--- /dev/null
+++ b/test/CodeGen/X86/combine-64bit-vec-binop.ll
@@ -0,0 +1,273 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+define double @test1_add(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %add = add <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test1_add
+; SSE41: paddd
+; AVX: vpaddd
+; CHECK-NEXT: ret
+
+
+define double @test2_add(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %add = add <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test2_add
+; SSE41: paddw
+; AVX: vpaddw
+; CHECK-NEXT: ret
+
+define double @test3_add(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %add = add <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test3_add
+; SSE41: paddb
+; AVX: vpaddb
+; CHECK-NEXT: ret
+
+
+define double @test1_sub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %sub = sub <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test1_sub
+; SSE41: psubd
+; AVX: vpsubd
+; CHECK-NEXT: ret
+
+
+define double @test2_sub(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %sub = sub <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test2_sub
+; SSE41: psubw
+; AVX: vpsubw
+; CHECK-NEXT: ret
+
+
+define double @test3_sub(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %sub = sub <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test3_sub
+; SSE41: psubb
+; AVX: vpsubb
+; CHECK-NEXT: ret
+
+
+define double @test1_mul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %mul = mul <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test1_mul
+; SSE41: pmulld
+; AVX: vpmulld
+; CHECK-NEXT: ret
+
+
+define double @test2_mul(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %mul = mul <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test2_mul
+; SSE41: pmullw
+; AVX: vpmullw
+; CHECK-NEXT: ret
+
+; There is no legal ISD::MUL with type MVT::v8i16.
+define double @test3_mul(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %mul = mul <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test3_mul
+; CHECK: pmullw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test1_and(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %and = and <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test1_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test2_and(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %and = and <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test2_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test3_and(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %and = and <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test3_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test1_or(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %or = or <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test1_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test2_or(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %or = or <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test2_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test3_or(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %or = or <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test3_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test1_xor(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %xor = xor <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test1_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test2_xor(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %xor = xor <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test2_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test3_xor(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %xor = xor <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test3_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test_fadd(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %add = fadd <2 x float> %1, %2
+ %3 = bitcast <2 x float> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test_fadd
+; SSE41: addps
+; AVX: vaddps
+; CHECK-NEXT: ret
+
+define double @test_fsub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %sub = fsub <2 x float> %1, %2
+ %3 = bitcast <2 x float> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test_fsub
+; SSE41: subps
+; AVX: vsubps
+; CHECK-NEXT: ret
+
+define double @test_fmul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %mul = fmul <2 x float> %1, %2
+ %3 = bitcast <2 x float> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test_fmul
+; SSE41: mulps
+; AVX: vmulps
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll
index c1ce533..ff807b9 100644
--- a/test/CodeGen/X86/combine-or.ll
+++ b/test/CodeGen/X86/combine-or.ll
@@ -25,7 +25,7 @@ define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test2
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: movsd
; CHECK: ret
@@ -74,7 +74,7 @@ define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test6
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: blendps $12
; CHECK-NEXT: ret
@@ -86,7 +86,7 @@ define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test7
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: blendps $12
; CHECK-NEXT: ret
@@ -111,7 +111,7 @@ define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test9
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: movsd
; CHECK: ret
diff --git a/test/CodeGen/X86/combine-vec-shuffle-2.ll b/test/CodeGen/X86/combine-vec-shuffle-2.ll
new file mode 100644
index 0000000..7ab7f80
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-2.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Check that DAGCombiner correctly folds the following pairs of shuffles
+; using the following rules:
+; 1. shuffle(shuffle(x, y), undef) -> x
+; 2. shuffle(shuffle(x, y), undef) -> y
+; 3. shuffle(shuffle(x, y), undef) -> shuffle(x, undef)
+; 4. shuffle(shuffle(x, y), undef) -> shuffle(undef, y)
+;
+; Rules 3. and 4. are used only if the resulting shuffle mask is legal.
+
+define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test1
+; Mask: [3,0,0,1]
+; CHECK: pshufd $67
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test2
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test3
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test4
+; Mask: [0,0,0,1]
+; CHECK: pshufd $64
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 4, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test5
+; Mask: [1,1]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test6
+; Mask: [2,0,0,0]
+; CHECK: pshufd $2
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test7(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,2,0,2]
+; CHECK: pshufd $-120
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test8
+; Mask: [1,0,3,0]
+; CHECK: pshufd $49
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test9(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test9
+; Mask: [1,3,0,2]
+; CHECK: pshufd $-115
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test10(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test10
+; Mask: [1,0,1,0]
+; CHECK: pshufd $17
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test11(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test11
+; Mask: [1,0,2,1]
+; CHECK: pshufd $97
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test12(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,0,0,0]
+; CHECK: pshufd $0
+; CHECK-NEXT: ret
+
+
+; The following pair of shuffles is folded into vector %A.
+define <4 x i32> @test13(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+; The following pair of shuffles is folded into vector %B.
+define <4 x i32> @test14(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: pshufd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/computeKnownBits_urem.ll b/test/CodeGen/X86/computeKnownBits_urem.ll
new file mode 100644
index 0000000..9902e6f
--- /dev/null
+++ b/test/CodeGen/X86/computeKnownBits_urem.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
+define i32 @main() #0 {
+entry:
+ %a = alloca i32, align 4
+ store i32 1, i32* %a, align 4
+ %0 = load i32* %a, align 4
+ %or = or i32 1, %0
+ %and = and i32 1, %or
+ %rem = urem i32 %and, 1
+ %add = add i32 %rem, 1
+ ret i32 %add
+}
+; CHECK: $1, %eax
+; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/cvt16.ll b/test/CodeGen/X86/cvt16.ll
new file mode 100644
index 0000000..951b5c3
--- /dev/null
+++ b/test/CodeGen/X86/cvt16.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=LIBCALL
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=F16C
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+
+; This is a test for float to half float conversions on x86-64.
+;
+; If flag -soft-float is set, or if there is no F16C support, then:
+; 1) half float to float conversions are
+; translated into calls to __gnu_h2f_ieee defined
+; by the compiler runtime library;
+; 2) float to half float conversions are translated into calls
+; to __gnu_f2h_ieee which expected to be defined by the
+; compiler runtime library.
+;
+; Otherwise (we have F16C support):
+; 1) half float to float conversion are translated using
+; vcvtph2ps instructions;
+; 2) float to half float conversions are translated using
+; vcvtps2ph instructions
+
+
+define void @test1(float %src, i16* %dest) {
+ %1 = tail call i16 @llvm.convert.to.fp16(float %src)
+ store i16 %1, i16* %dest, align 2
+ ret void
+}
+; CHECK-LABEL: test1
+; LIBCALL: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; F16C: vcvtps2ph
+; CHECK: ret
+
+
+define float @test2(i16* nocapture %src) {
+ %1 = load i16* %src, align 2
+ %2 = tail call float @llvm.convert.from.fp16(i16 %1)
+ ret float %2
+}
+; CHECK-LABEL: test2:
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtph2ps
+; F16C: ret
+
+
+define float @test3(float %src) nounwind uwtable readnone {
+ %1 = tail call i16 @llvm.convert.to.fp16(float %src)
+ %2 = tail call float @llvm.convert.from.fp16(i16 %1)
+ ret float %2
+}
+
+; CHECK-LABEL: test3:
+; LIBCALL: callq __gnu_f2h_ieee
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtps2ph
+; F16C-NEXT: vcvtph2ps
+; F16C: ret
+
+declare float @llvm.convert.from.fp16(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16(float) nounwind readnone
+
diff --git a/test/CodeGen/X86/dagcombine-and-setcc.ll b/test/CodeGen/X86/dagcombine-and-setcc.ll
new file mode 100644
index 0000000..e7336a9
--- /dev/null
+++ b/test/CodeGen/X86/dagcombine-and-setcc.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture readonly, ...)
+
+; On X86 1 is true and 0 is false, so we can't perform the combine:
+; (and (setgt X, true), (setgt Y, true)) -> (setgt (or X, Y), true)
+; This combine only works if the true value is -1.
+
+
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: andb
+
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+; Function Attrs: optsize ssp uwtable
+define i32 @foo(i32 %a, i32 %b, i32 * %c) {
+if.else429:
+ %cmp.i1144 = icmp eq i32* %c, null
+ %cmp430 = icmp slt i32 %a, 2
+ %cmp432 = icmp slt i32 %b, 2
+ %or.cond710 = or i1 %cmp430, %cmp432
+ %or.cond710.not = xor i1 %or.cond710, true
+ %brmerge1448 = or i1 %cmp.i1144, %or.cond710.not
+ br i1 %brmerge1448, label %ret1, label %ret2
+
+ret1:
+ ret i32 0
+
+ret2:
+ ret i32 1
+}
+
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) {
+ %res = alloca i32, align 4
+ %t = call i32 @foo(i32 1, i32 2, i32* %res) #3
+ %v = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %t)
+ ret i32 0
+}
+
+
+
diff --git a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index 23f8335..4912213 100644
--- a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -52,58 +52,153 @@ define void @_Z3barii(i32 %param1, i32 %param2) #0 {
entry:
%var1 = alloca %struct.AAA3, align 1
%var2 = alloca %struct.AAA3, align 1
- %tobool = icmp eq i32 %param2, 0
- br i1 %tobool, label %if.end, label %if.then
+ tail call void @llvm.dbg.value(metadata !{i32 %param1}, i64 0, metadata !30), !dbg !47
+ tail call void @llvm.dbg.value(metadata !{i32 %param2}, i64 0, metadata !31), !dbg !47
+ tail call void @llvm.dbg.value(metadata !48, i64 0, metadata !32), !dbg !49
+ %tobool = icmp eq i32 %param2, 0, !dbg !50
+ br i1 %tobool, label %if.end, label %if.then, !dbg !50
if.then: ; preds = %entry
- %call = call i8* @_Z5i2stri(i32 %param2)
- br label %if.end
+ %call = tail call i8* @_Z5i2stri(i32 %param2), !dbg !52
+ tail call void @llvm.dbg.value(metadata !{i8* %call}, i64 0, metadata !32), !dbg !49
+ br label %if.end, !dbg !54
if.end: ; preds = %entry, %if.then
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !60)
- call void @llvm.dbg.value(metadata !62, i64 0, metadata !63)
- %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0
- call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- call void @llvm.dbg.declare(metadata !{%struct.AAA3* %var2}, metadata !38)
- %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- %tobool1 = icmp eq i32 %param1, 0
- br i1 %tobool1, label %if.else, label %if.then2
+ tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33), !dbg !55
+ tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !56), !dbg !57
+ tail call void @llvm.dbg.value(metadata !58, i64 0, metadata !59), !dbg !60
+ %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !61
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !61
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34), !dbg !63
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !64), !dbg !65
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !66), !dbg !67
+ %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !68
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !68
+ %tobool1 = icmp eq i32 %param1, 0, !dbg !69
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34), !dbg !63
+ br i1 %tobool1, label %if.else, label %if.then2, !dbg !69
if.then2: ; preds = %if.end
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0))
- br label %if.end3
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !71), !dbg !73
+ call void @llvm.dbg.value(metadata !74, i64 0, metadata !75), !dbg !76
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)), !dbg !76
+ br label %if.end3, !dbg !72
if.else: ; preds = %if.end
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0))
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !77), !dbg !79
+ call void @llvm.dbg.value(metadata !80, i64 0, metadata !81), !dbg !82
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)), !dbg !82
br label %if.end3
if.end3: ; preds = %if.else, %if.then2
- call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- ret void
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33), !dbg !55
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !83), !dbg !85
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !86), !dbg !87
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !87
+ ret void, !dbg !88
}
-; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata) #1
-
-declare i8* @_Z5i2stri(i32) #2
+declare i8* @_Z5i2stri(i32) #1
-declare void @_Z3fooPcjPKc(i8*, i32, i8*) #2
+declare void @_Z3fooPcjPKc(i8*, i32, i8*) #1
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.value(metadata, i64, metadata) #1
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!llvm.module.flags = !{!48, !49}
-!llvm.ident = !{!50}
-
-!38 = metadata !{i32 786688, null, metadata !"var2", null, i32 20, null, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var2] [line 20]
-!48 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
-!50 = metadata !{metadata !"clang version 3.5 (202418)"}
-!60 = metadata !{i32 786689, null, metadata !"this", null, i32 16777216, null, i32 1088, null} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!62 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
-!63 = metadata !{i32 786689, null, metadata !"value", null, i32 33554439, null, i32 0, null} ; [ DW_TAG_arg_variable ] [value] [line 7]
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!44, !45}
+!llvm.ident = !{!46}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !3, metadata !23, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"dbg-changes-codegen-branch-folding.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"AAA3", i32 4, i64 32, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_structure_type ] [AAA3] [line 4, size 32, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !11, metadata !17, metadata !18}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS4AAA3", metadata !"text", i32 8, i64 32, i64 8, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [text] [line 8, size 32, align 8, offset 0] [from ]
+!7 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 32, i64 8, i32 0, i32 0, metadata !8, metadata !9, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 32, align 8, offset 0] [from char]
+!8 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ] [0, 3]
+!11 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"AAA3", metadata !"AAA3", metadata !"", i32 5, metadata !12, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 5} ; [ DW_TAG_subprogram ] [line 5] [AAA3]
+!12 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !13, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!13 = metadata !{null, metadata !14, metadata !15}
+!14 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS4AAA3]
+!15 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!16 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from char]
+!17 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator=", metadata !"operator=", metadata !"_ZN4AAA3aSEPKc", i32 6, metadata !12, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 6} ; [ DW_TAG_subprogram ] [line 6] [operator=]
+!18 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator const char *", metadata !"operator const char *", metadata !"_ZNK4AAA3cvPKcEv", i32 7, metadata !19, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 7} ; [ DW_TAG_subprogram ] [line 7] [operator const char *]
+!19 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !20, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!20 = metadata !{metadata !15, metadata !21}
+!21 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !22} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from ]
+!22 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS4AAA3"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS4AAA3]
+!23 = metadata !{metadata !24, metadata !35, metadata !40}
+!24 = metadata !{i32 786478, metadata !1, metadata !25, metadata !"bar", metadata !"bar", metadata !"_Z3barii", i32 11, metadata !26, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32, i32)* @_Z3barii, null, null, metadata !29, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [bar]
+!25 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!26 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !27, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!27 = metadata !{null, metadata !28, metadata !28}
+!28 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!29 = metadata !{metadata !30, metadata !31, metadata !32, metadata !33, metadata !34}
+!30 = metadata !{i32 786689, metadata !24, metadata !"param1", metadata !25, i32 16777227, metadata !28, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [param1] [line 11]
+!31 = metadata !{i32 786689, metadata !24, metadata !"param2", metadata !25, i32 33554443, metadata !28, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [param2] [line 11]
+!32 = metadata !{i32 786688, metadata !24, metadata !"temp", metadata !25, i32 12, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [temp] [line 12]
+!33 = metadata !{i32 786688, metadata !24, metadata !"var1", metadata !25, i32 17, metadata !"_ZTS4AAA3", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var1] [line 17]
+!34 = metadata !{i32 786688, metadata !24, metadata !"var2", metadata !25, i32 18, metadata !"_ZTS4AAA3", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var2] [line 18]
+!35 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator=", metadata !"operator=", metadata !"_ZN4AAA3aSEPKc", i32 6, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, metadata !17, metadata !36, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [operator=]
+!36 = metadata !{metadata !37, metadata !39}
+!37 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!38 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS4AAA3]
+!39 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!40 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"AAA3", metadata !"AAA3", metadata !"_ZN4AAA3C2EPKc", i32 5, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, metadata !11, metadata !41, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [AAA3]
+!41 = metadata !{metadata !42, metadata !43}
+!42 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!43 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!44 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!45 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!46 = metadata !{metadata !"clang version 3.5.0 "}
+!47 = metadata !{i32 11, i32 0, metadata !24, null}
+!48 = metadata !{i8* null}
+!49 = metadata !{i32 12, i32 0, metadata !24, null}
+!50 = metadata !{i32 14, i32 0, metadata !51, null}
+!51 = metadata !{i32 786443, metadata !1, metadata !24, i32 14, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!52 = metadata !{i32 15, i32 0, metadata !53, null}
+!53 = metadata !{i32 786443, metadata !1, metadata !51, i32 14, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!54 = metadata !{i32 16, i32 0, metadata !53, null}
+!55 = metadata !{i32 17, i32 0, metadata !24, null}
+!56 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !55} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!57 = metadata !{i32 0, i32 0, metadata !40, metadata !55}
+!58 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
+!59 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, metadata !55} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!60 = metadata !{i32 5, i32 0, metadata !40, metadata !55}
+!61 = metadata !{i32 5, i32 0, metadata !62, metadata !55}
+!62 = metadata !{i32 786443, metadata !1, metadata !40, i32 5, i32 0, i32 0, i32 3} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!63 = metadata !{i32 18, i32 0, metadata !24, null}
+!64 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !63} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!65 = metadata !{i32 0, i32 0, metadata !40, metadata !63}
+!66 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, metadata !63} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!67 = metadata !{i32 5, i32 0, metadata !40, metadata !63}
+!68 = metadata !{i32 5, i32 0, metadata !62, metadata !63}
+!69 = metadata !{i32 20, i32 0, metadata !70, null}
+!70 = metadata !{i32 786443, metadata !1, metadata !24, i32 20, i32 0, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!71 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !72} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!72 = metadata !{i32 21, i32 0, metadata !70, null}
+!73 = metadata !{i32 0, i32 0, metadata !35, metadata !72}
+!74 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)}
+!75 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !72} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!76 = metadata !{i32 6, i32 0, metadata !35, metadata !72}
+!77 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !78} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!78 = metadata !{i32 23, i32 0, metadata !70, null}
+!79 = metadata !{i32 0, i32 0, metadata !35, metadata !78}
+!80 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)}
+!81 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !78} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!82 = metadata !{i32 6, i32 0, metadata !35, metadata !78}
+!83 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !84} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!84 = metadata !{i32 24, i32 0, metadata !24, null}
+!85 = metadata !{i32 0, i32 0, metadata !35, metadata !84}
+!86 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !84} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!87 = metadata !{i32 6, i32 0, metadata !35, metadata !84}
+!88 = metadata !{i32 25, i32 0, metadata !24, null}
diff --git a/test/CodeGen/X86/dllexport-x86_64.ll b/test/CodeGen/X86/dllexport-x86_64.ll
index f4dec4f..0d5afa1 100644
--- a/test/CodeGen/X86/dllexport-x86_64.ll
+++ b/test/CodeGen/X86/dllexport-x86_64.ll
@@ -73,7 +73,7 @@ define weak_odr dllexport void @weak1() {
@weak_alias = dllexport alias weak_odr void()* @f1
@blob = global [6 x i8] c"\B8*\00\00\00\C3", section ".text", align 16
-@blob_alias = dllexport alias i32 (), [6 x i8]* @blob
+@blob_alias = dllexport alias bitcast ([6 x i8]* @blob to i32 ()*)
; CHECK: .section .drectve
; WIN32: " /EXPORT:Var1,DATA"
diff --git a/test/CodeGen/X86/elf-comdat.ll b/test/CodeGen/X86/elf-comdat.ll
new file mode 100644
index 0000000..c7e6df7
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+define void @f() comdat $f {
+ ret void
+}
+; CHECK: .section .text.f,"axG",@progbits,f,comdat
+; CHECK: .globl f
+; CHECK: .section .bss.v,"aGw",@nobits,f,comdat
+; CHECK: .globl v
diff --git a/test/CodeGen/X86/elf-comdat2.ll b/test/CodeGen/X86/elf-comdat2.ll
new file mode 100644
index 0000000..209da39
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat2.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$foo = comdat any
+@bar = global i32 42, comdat $foo
+@foo = global i32 42
+
+; CHECK: .type bar,@object
+; CHECK-NEXT: .section .data.bar,"aGw",@progbits,foo,comdat
+; CHECK-NEXT: .globl bar
+; CHECK: .type foo,@object
+; CHECK-NEXT: .data
+; CHECK-NEXT: .globl foo
diff --git a/test/CodeGen/X86/fast-isel-args-fail2.ll b/test/CodeGen/X86/fast-isel-args-fail2.ll
new file mode 100644
index 0000000..08de472
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-args-fail2.ll
@@ -0,0 +1,10 @@
+; RUN: not --crash llc < %s -fast-isel -fast-isel-abort-args -mtriple=x86_64-apple-darwin10
+; REQUIRES: asserts
+
+%struct.s0 = type { x86_fp80, x86_fp80 }
+
+; FastISel cannot handle this case yet. Make sure that we abort.
+define i8* @args_fail(%struct.s0* byval nocapture readonly align 16 %y) {
+ %1 = bitcast %struct.s0* %y to i8*
+ ret i8* %1
+}
diff --git a/test/CodeGen/X86/fast-isel-args.ll b/test/CodeGen/X86/fast-isel-args.ll
index 0f36265..8c86a9c 100644
--- a/test/CodeGen/X86/fast-isel-args.ll
+++ b/test/CodeGen/X86/fast-isel-args.ll
@@ -23,3 +23,27 @@ entry:
%add2 = add nsw i64 %add, %conv1
ret i64 %add2
}
+
+define float @t4(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) {
+entry:
+ %add1 = fadd float %a, %b
+ %add2 = fadd float %c, %d
+ %add3 = fadd float %e, %f
+ %add4 = fadd float %g, %h
+ %add5 = fadd float %add1, %add2
+ %add6 = fadd float %add3, %add4
+ %add7 = fadd float %add5, %add6
+ ret float %add7
+}
+
+define double @t5(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) {
+entry:
+ %add1 = fadd double %a, %b
+ %add2 = fadd double %c, %d
+ %add3 = fadd double %e, %f
+ %add4 = fadd double %g, %h
+ %add5 = fadd double %add1, %add2
+ %add6 = fadd double %add3, %add4
+ %add7 = fadd double %add5, %add6
+ ret double %add7
+}
diff --git a/test/CodeGen/X86/fast-isel-branch_weights.ll b/test/CodeGen/X86/fast-isel-branch_weights.ll
new file mode 100644
index 0000000..bc41395
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-branch_weights.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if the BBs are reordred according to their branch weights.
+define i64 @branch_weights_test(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_weights_test
+; CHECK-LABEL: success
+; CHECK-LABEL: fail
+ %1 = icmp ult i64 %a, %b
+ br i1 %1, label %fail, label %success, !prof !0
+
+fail:
+ ret i64 -1
+
+success:
+ ret i64 0
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch2.ll b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
new file mode 100644
index 0000000..7e45c49
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
@@ -0,0 +1,294 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq(float %x, float %y) {
+; CHECK-LABEL: fcmp_oeq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ogt
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge(float %x, float %y) {
+; CHECK-LABEL: fcmp_oge
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt(float %x, float %y) {
+; CHECK-LABEL: fcmp_olt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole(float %x, float %y) {
+; CHECK-LABEL: fcmp_ole
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one(float %x, float %y) {
+; CHECK-LABEL: fcmp_one
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord(float %x, float %y) {
+; CHECK-LABEL: fcmp_ord
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno(float %x, float %y) {
+; CHECK-LABEL: fcmp_uno
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq(float %x, float %y) {
+; CHECK-LABEL: fcmp_ueq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ugt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge(float %x, float %y) {
+; CHECK-LABEL: fcmp_uge
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult(float %x, float %y) {
+; CHECK-LABEL: fcmp_ult
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule(float %x, float %y) {
+; CHECK-LABEL: fcmp_ule
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une(float %x, float %y) {
+; CHECK-LABEL: fcmp_une
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_eq
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jne {{LBB.+_1}}
+ %1 = icmp eq i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ne
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = icmp ne i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ugt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = icmp ugt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_uge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = icmp uge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ult
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = icmp ult i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ule
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = icmp ule i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sgt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jle {{LBB.+_1}}
+ %1 = icmp sgt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jl {{LBB.+_1}}
+ %1 = icmp sge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_slt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jge {{LBB.+_1}}
+ %1 = icmp slt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sle
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jg {{LBB.+_1}}
+ %1 = icmp sle i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch3.ll b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
new file mode 100644
index 0000000..a3f6851
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
@@ -0,0 +1,470 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq1(float %x) {
+; CHECK-LABEL: fcmp_oeq1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oeq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oeq2(float %x) {
+; CHECK-LABEL: fcmp_oeq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt1(float %x) {
+; CHECK-LABEL: fcmp_ogt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp ogt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt2(float %x) {
+; CHECK-LABEL: fcmp_ogt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge1(float %x) {
+; CHECK-LABEL: fcmp_oge1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge2(float %x) {
+; CHECK-LABEL: fcmp_oge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt1(float %x) {
+; CHECK-LABEL: fcmp_olt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp olt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt2(float %x) {
+; CHECK-LABEL: fcmp_olt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole1(float %x) {
+; CHECK-LABEL: fcmp_ole1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ole float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole2(float %x) {
+; CHECK-LABEL: fcmp_ole2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one1(float %x) {
+; CHECK-LABEL: fcmp_one1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp one float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one2(float %x) {
+; CHECK-LABEL: fcmp_one2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord1(float %x) {
+; CHECK-LABEL: fcmp_ord1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord2(float %x) {
+; CHECK-LABEL: fcmp_ord2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno1(float %x) {
+; CHECK-LABEL: fcmp_uno1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno2(float %x) {
+; CHECK-LABEL: fcmp_uno2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq1(float %x) {
+; CHECK-LABEL: fcmp_ueq1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ueq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq2(float %x) {
+; CHECK-LABEL: fcmp_ueq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt1(float %x) {
+; CHECK-LABEL: fcmp_ugt1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt2(float %x) {
+; CHECK-LABEL: fcmp_ugt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge1(float %x) {
+; CHECK-LABEL: fcmp_uge1
+; CHECK-NOT: ucomiss
+ %1 = fcmp uge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge2(float %x) {
+; CHECK-LABEL: fcmp_uge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult1(float %x) {
+; CHECK-LABEL: fcmp_ult1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ult float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult2(float %x) {
+; CHECK-LABEL: fcmp_ult2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule1(float %x) {
+; CHECK-LABEL: fcmp_ule1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ule float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule2(float %x) {
+; CHECK-LABEL: fcmp_ule2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une1(float %x) {
+; CHECK-LABEL: fcmp_une1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp une float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une2(float %x) {
+; CHECK-LABEL: fcmp_une2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x) {
+; CHECK-LABEL: icmp_eq
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp eq i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x) {
+; CHECK-LABEL: icmp_ne
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ne i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x) {
+; CHECK-LABEL: icmp_ugt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ugt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x) {
+; CHECK-LABEL: icmp_uge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp uge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x) {
+; CHECK-LABEL: icmp_ult
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ult i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x) {
+; CHECK-LABEL: icmp_ule
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp ule i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x) {
+; CHECK-LABEL: icmp_sgt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp sgt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x) {
+; CHECK-LABEL: icmp_sge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x) {
+; CHECK-LABEL: icmp_slt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp slt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x) {
+; CHECK-LABEL: icmp_sle
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sle i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
new file mode 100644
index 0000000..1b72cfc
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -0,0 +1,689 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=SDAG
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=FAST
+
+define zeroext i1 @fcmp_oeq(float %x, float %y) {
+; SDAG-LABEL: fcmp_oeq
+; SDAG: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ogt
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge(float %x, float %y) {
+; SDAG-LABEL: fcmp_oge
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt(float %x, float %y) {
+; SDAG-LABEL: fcmp_olt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole(float %x, float %y) {
+; SDAG-LABEL: fcmp_ole
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one(float %x, float %y) {
+; SDAG-LABEL: fcmp_one
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord(float %x, float %y) {
+; SDAG-LABEL: fcmp_ord
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno(float %x, float %y) {
+; SDAG-LABEL: fcmp_uno
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq(float %x, float %y) {
+; SDAG-LABEL: fcmp_ueq
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ugt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge(float %x, float %y) {
+; SDAG-LABEL: fcmp_uge
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult(float %x, float %y) {
+; SDAG-LABEL: fcmp_ult
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule(float %x, float %y) {
+; SDAG-LABEL: fcmp_ule
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une(float %x, float %y) {
+; SDAG-LABEL: fcmp_une
+; SDAG: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_eq
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: sete %al
+; FAST-LABEL: icmp_eq
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: sete %al
+ %1 = icmp eq i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ne
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setne %al
+; FAST-LABEL: icmp_ne
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setne %al
+ %1 = icmp ne i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ugt
+; SDAG: cmpl %edi, %esi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ugt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: seta %al
+ %1 = icmp ugt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_uge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setae %al
+; FAST-LABEL: icmp_uge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setae %al
+ %1 = icmp uge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ult
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ult
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setb %al
+ %1 = icmp ult i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ule
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: icmp_ule
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setbe %al
+ %1 = icmp ule i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sgt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setg %al
+; FAST-LABEL: icmp_sgt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setg %al
+ %1 = icmp sgt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setge %al
+; FAST-LABEL: icmp_sge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setge %al
+ %1 = icmp sge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_slt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setl %al
+; FAST-LABEL: icmp_slt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setl %al
+ %1 = icmp slt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sle
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setle %al
+; FAST-LABEL: icmp_sle
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setle %al
+ %1 = icmp sle i32 %x, %y
+ ret i1 %1
+}
+
+; Test cmp folding and condition optimization.
+define zeroext i1 @fcmp_oeq2(float %x) {
+; SDAG-LABEL: fcmp_oeq2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oeq2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oeq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oeq3(float %x) {
+; SDAG-LABEL: fcmp_oeq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt2(float %x) {
+; SDAG-LABEL: fcmp_ogt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_ogt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp ogt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt3(float %x) {
+; SDAG-LABEL: fcmp_ogt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_ogt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge2(float %x) {
+; SDAG-LABEL: fcmp_oge2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oge2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge3(float %x) {
+; SDAG-LABEL: fcmp_oge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt2(float %x) {
+; SDAG-LABEL: fcmp_olt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_olt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp olt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt3(float %x) {
+; SDAG-LABEL: fcmp_olt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole2(float %x) {
+; SDAG-LABEL: fcmp_ole2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ole2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ole float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole3(float %x) {
+; SDAG-LABEL: fcmp_ole3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one2(float %x) {
+; SDAG-LABEL: fcmp_one2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_one2
+; FAST: xorl %eax, %eax
+ %1 = fcmp one float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one3(float %x) {
+; SDAG-LABEL: fcmp_one3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord2(float %x) {
+; SDAG-LABEL: fcmp_ord2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord3(float %x) {
+; SDAG-LABEL: fcmp_ord3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno2(float %x) {
+; SDAG-LABEL: fcmp_uno2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno3(float %x) {
+; SDAG-LABEL: fcmp_uno3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq2(float %x) {
+; SDAG-LABEL: fcmp_ueq2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ueq2
+; FAST: movb $1, %al
+ %1 = fcmp ueq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq3(float %x) {
+; SDAG-LABEL: fcmp_ueq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt2(float %x) {
+; SDAG-LABEL: fcmp_ugt2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ugt2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ugt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt3(float %x) {
+; SDAG-LABEL: fcmp_ugt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge2(float %x) {
+; SDAG-LABEL: fcmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_uge2
+; FAST: movb $1, %al
+ %1 = fcmp uge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge3(float %x) {
+; SDAG-LABEL: fcmp_uge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult2(float %x) {
+; SDAG-LABEL: fcmp_ult2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ult2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ult float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult3(float %x) {
+; SDAG-LABEL: fcmp_ult3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule2(float %x) {
+; SDAG-LABEL: fcmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ule2
+; FAST: movb $1, %al
+ %1 = fcmp ule float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule3(float %x) {
+; SDAG-LABEL: fcmp_ule3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une2(float %x) {
+; SDAG-LABEL: fcmp_une2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_une2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp une float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une3(float %x) {
+; SDAG-LABEL: fcmp_une3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq2(i32 %x) {
+; SDAG-LABEL: icmp_eq2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_eq2
+; FAST: movb $1, %al
+ %1 = icmp eq i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne2(i32 %x) {
+; SDAG-LABEL: icmp_ne2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ne2
+; FAST: xorl %eax, %eax
+ %1 = icmp ne i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt2(i32 %x) {
+; SDAG-LABEL: icmp_ugt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ugt2
+; FAST: xorl %eax, %eax
+ %1 = icmp ugt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge2(i32 %x) {
+; SDAG-LABEL: icmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_uge2
+; FAST: movb $1, %al
+ %1 = icmp uge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult2(i32 %x) {
+; SDAG-LABEL: icmp_ult2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ult2
+; FAST: xorl %eax, %eax
+ %1 = icmp ult i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule2(i32 %x) {
+; SDAG-LABEL: icmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_ule2
+; FAST: movb $1, %al
+ %1 = icmp ule i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt2(i32 %x) {
+; SDAG-LABEL: icmp_sgt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_sgt2
+; FAST: xorl %eax, %eax
+ %1 = icmp sgt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge2(i32 %x) {
+; SDAG-LABEL: icmp_sge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sge2
+; FAST: movb $1, %al
+ %1 = icmp sge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt2(i32 %x) {
+; SDAG-LABEL: icmp_slt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_slt2
+; FAST: xorl %eax, %eax
+ %1 = icmp slt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle2(i32 %x) {
+; SDAG-LABEL: icmp_sle2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sle2
+; FAST: movb $1, %al
+ %1 = icmp sle i32 %x, %x
+ ret i1 %1
+}
+
diff --git a/test/CodeGen/X86/fast-isel-fold-mem.ll b/test/CodeGen/X86/fast-isel-fold-mem.ll
new file mode 100644
index 0000000..a945779
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-fold-mem.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin | FileCheck %s
+
+define i64 @fold_load(i64* %a, i64 %b) {
+; CHECK-LABEL: fold_load
+; CHECK: addq (%rdi), %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = load i64* %a, align 8
+ %2 = add i64 %1, %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll
new file mode 100644
index 0000000..8008e28
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test conditional move for the supported types (i16, i32, and i32) and
+; conditon input (argument or cmp). Currently i8 is not supported.
+
+define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmov_i16
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovew %dx, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = select i1 %cond, i16 %a, i16 %b
+ ret i16 %1
+}
+
+define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmp_cmov_i16
+; CHECK: cmpw %si, %di
+; CHECK-NEXT: cmovbw %di, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = icmp ult i16 %a, %b
+ %2 = select i1 %1, i16 %a, i16 %b
+ ret i16 %2
+}
+
+define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmov_i32
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovel %edx, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = select i1 %cond, i32 %a, i32 %b
+ ret i32 %1
+}
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = icmp ult i32 %a, %b
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+}
+
+define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmov_i64
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmoveq %rdx, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = select i1 %cond, i64 %a, i64 %b
+ ret i64 %1
+}
+
+define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmp_cmov_i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %a, i64 %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov2.ll b/test/CodeGen/X86/fast-isel-select-cmov2.ll
new file mode 100644
index 0000000..658098f
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov2.ll
@@ -0,0 +1,255 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+
+; Test all the cmp predicates that can feed an integer conditional move.
+
+define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_false_cmov
+; CHECK: movq %rsi, %rax
+; CHECK-NEXT: retq
+ %1 = fcmp false double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oeq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setnp %al
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: testb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ogt_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oge_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_olt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ole_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_one_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ord_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovpq %rsi, %rdi
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uno_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovnpq %rsi, %rdi
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ueq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovneq %rsi, %rdi
+ %1 = fcmp ueq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ugt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uge_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ult_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ule_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_une_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setp %al
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_true_cmov
+; CHECK: movq %rdi, %rax
+ %1 = fcmp true double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_eq_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovneq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ne_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmoveq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ugt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+
+define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_uge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ult_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ule_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sgt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovleq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovlq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_slt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sle_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmp.ll b/test/CodeGen/X86/fast-isel-select-cmp.ll
new file mode 100644
index 0000000..1af30e9
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmp.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if we do not fold the cmp into select if the instructions are in
+; different basic blocks.
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = icmp ult i32 %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+
+exit:
+ ret i32 -1
+}
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = fcmp oeq float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: ucomi
+ %1 = fcmp one float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
new file mode 100644
index 0000000..1ec4d64
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
@@ -0,0 +1,138 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s
+
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_one_f64
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_eq_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: je [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ne_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ugt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: ja [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_uge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jae [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ult_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ule_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jbe [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sgt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jg [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jge [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_slt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jl [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sle_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jle [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-sse.ll b/test/CodeGen/X86/fast-isel-select-sse.ll
new file mode 100644
index 0000000..3c03a03
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-sse.ll
@@ -0,0 +1,391 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+
+; Test all cmp predicates that can be used with SSE.
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK: cmpeqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f32
+; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oeq_f64
+; CHECK: cmpeqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f64
+; AVX: vcmpeqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ogt_f32
+; CHECK: cmpltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f32
+; AVX: vcmpltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ogt_f64
+; CHECK: cmpltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f64
+; AVX: vcmpltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oge_f32
+; CHECK: cmpless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f32
+; AVX: vcmpless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oge_f64
+; CHECK: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f64
+; AVX: vcmplesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_olt_f32
+; CHECK: cmpltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f32
+; AVX: vcmpltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_olt_f64
+; CHECK: cmpltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f64
+; AVX: vcmpltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ole_f32
+; CHECK: cmpless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f32
+; AVX: vcmpless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ole_f64
+; CHECK: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f64
+; AVX: vcmplesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ord_f32
+; CHECK: cmpordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f32
+; AVX: vcmpordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ord_f64
+; CHECK: cmpordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f64
+; AVX: vcmpordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uno_f32
+; CHECK: cmpunordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f32
+; AVX: vcmpunordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uno_f64
+; CHECK: cmpunordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f64
+; AVX: vcmpunordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ugt_f32
+; CHECK: cmpnless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f32
+; AVX: vcmpnless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ugt_f64
+; CHECK: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f64
+; AVX: vcmpnlesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uge_f32
+; CHECK: cmpnltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f32
+; AVX: vcmpnltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uge_f64
+; CHECK: cmpnltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f64
+; AVX: vcmpnltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ult_f32
+; CHECK: cmpnless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f32
+; AVX: vcmpnless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ult_f64
+; CHECK: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f64
+; AVX: vcmpnlesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ule_f32
+; CHECK: cmpnltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f32
+; AVX: vcmpnltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ule_f64
+; CHECK: cmpnltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f64
+; AVX: vcmpnltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_une_f32
+; CHECK: cmpneqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f32
+; AVX: vcmpneqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp une float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_une_f64
+; CHECK: cmpneqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f64
+; AVX: vcmpneqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select.ll b/test/CodeGen/X86/fast-isel-select.ll
index 53158bc..7b3c99f 100644
--- a/test/CodeGen/X86/fast-isel-select.ll
+++ b/test/CodeGen/X86/fast-isel-select.ll
@@ -4,10 +4,10 @@
; lsb is zero.
; <rdar://problem/15651765>
-; CHECK-LABEL: fastisel_select:
+; CHECK-LABEL: fastisel_select:
; CHECK: subb {{%[a-z0-9]+}}, [[RES:%[a-z0-9]+]]
; CHECK: testb $1, [[RES]]
-; CHECK: cmovel
+; CHECK: cmovnel %edi, %esi
define i32 @fastisel_select(i1 %exchSub2211_, i1 %trunc_8766) {
%shuffleInternal15257_8932 = sub i1 %exchSub2211_, %trunc_8766
%counter_diff1345 = select i1 %shuffleInternal15257_8932, i32 1204476887, i32 0
diff --git a/test/CodeGen/X86/fast-isel-sse12-fptoint.ll b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
new file mode 100644
index 0000000..769c987
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
@@ -0,0 +1,54 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define i32 @cvt_test1(float %a) {
+; SSE-LABEL: cvt_test1
+; SSE: cvttss2si %xmm0, %eax
+; AVX-LABEL: cvt_test1
+; AVX: vcvttss2si %xmm0, %eax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %4)
+ ret i32 %5
+}
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+
+define i64 @cvt_test2(float %a) {
+; SSE-LABEL: cvt_test2
+; SSE: cvttss2si %xmm0, %rax
+; AVX-LABEL: cvt_test2
+; AVX: vcvttss2si %xmm0, %rax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %4)
+ ret i64 %5
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+
+define i32 @cvt_test3(double %a) {
+; SSE-LABEL: cvt_test3
+; SSE: cvttsd2si %xmm0, %eax
+; AVX-LABEL: cvt_test3
+; AVX: vcvttsd2si %xmm0, %eax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %2)
+ ret i32 %3
+}
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+
+define i64 @cvt_test4(double %a) {
+; SSE-LABEL: cvt_test4
+; SSE: cvttsd2si %xmm0, %rax
+; AVX-LABEL: cvt_test4
+; AVX: vcvttsd2si %xmm0, %rax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %2)
+ ret i64 %3
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
diff --git a/test/CodeGen/X86/float-asmprint.ll b/test/CodeGen/X86/float-asmprint.ll
index 4aeae7f..5de9700 100644
--- a/test/CodeGen/X86/float-asmprint.ll
+++ b/test/CodeGen/X86/float-asmprint.ll
@@ -16,8 +16,9 @@
; CHECK-NEXT: .size
; CHECK: varppc128:
-; CHECK-NEXT: .quad 0 # ppc_fp128 -0
-; CHECK-NEXT: .quad -9223372036854775808
+; For ppc_fp128, the high double always comes first.
+; CHECK-NEXT: .quad -9223372036854775808 # ppc_fp128 -0
+; CHECK-NEXT: .quad 0
; CHECK-NEXT: .size
; CHECK: var80:
diff --git a/test/CodeGen/X86/frameaddr.ll b/test/CodeGen/X86/frameaddr.ll
new file mode 100644
index 0000000..6c1ca25
--- /dev/null
+++ b/test/CodeGen/X86/frameaddr.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-64
+
+define i8* @test1() nounwind {
+entry:
+; CHECK-32-LABEL: test1
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl %ebp, %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test1
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq %rbp, %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @test2() nounwind {
+entry:
+; CHECK-32-LABEL: test2
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl (%ebp), %eax
+; CHECK-32-NEXT: movl (%eax), %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test2
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq (%rbp), %rax
+; CHECK-64-NEXT: movq (%rax), %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/gcc_except_table.ll b/test/CodeGen/X86/gcc_except_table.ll
index 8c328ec..a732eb1 100644
--- a/test/CodeGen/X86/gcc_except_table.ll
+++ b/test/CodeGen/X86/gcc_except_table.ll
@@ -13,14 +13,14 @@ define i32 @main() uwtable optsize ssp {
; APPLE: GCC_except_table0:
; APPLE: Lexception0:
-; MINGW64: .cfi_startproc
-; MINGW64: .cfi_personality 0, __gxx_personality_v0
-; MINGW64: .cfi_lsda 0, .Lexception0
-; MINGW64: .cfi_def_cfa_offset 16
+; MINGW64: .seh_proc
+; MINGW64: .seh_handler __gxx_personality_v0
+; MINGW64: .seh_setframe 5, 0
; MINGW64: callq _Unwind_Resume
-; MINGW64: .cfi_endproc
+; MINGW64: .seh_handlerdata
; MINGW64: GCC_except_table0:
; MINGW64: Lexception0:
+; MINGW64: .seh_endproc
; MINGW32: .cfi_startproc
; MINGW32: .cfi_personality 0, ___gxx_personality_v0
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
new file mode 100644
index 0000000..ff939a9
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -0,0 +1,802 @@
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3,+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+
+define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test1
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test2
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test1
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test2
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test1
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 3
+ %vecext7 = extractelement <4 x i32> %B, i32 2
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 1
+ %vecext11 = extractelement <4 x i32> %B, i32 0
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test2
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test1
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 2
+ %vecext7 = extractelement <4 x i32> %B, i32 3
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 0
+ %vecext11 = extractelement <4 x i32> %B, i32 1
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test2
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test1
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 1
+ %vecext1 = extractelement <2 x double> %A, i32 0
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 1
+ %vecext3 = extractelement <2 x double> %B, i32 0
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test2
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test1
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 0
+ %vecext3 = extractelement <2 x double> %A, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test2
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhadd_pd_test
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3: haddpd
+; AVX: vhaddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhsub_pd_test
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
+ %vecext = extractelement <8 x i32> %A, i32 0
+ %vecext1 = extractelement <8 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %A, i32 2
+ %vecext3 = extractelement <8 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %A, i32 4
+ %vecext7 = extractelement <8 x i32> %A, i32 5
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %A, i32 6
+ %vecext11 = extractelement <8 x i32> %A, i32 7
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %B, i32 0
+ %vecext15 = extractelement <8 x i32> %B, i32 1
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %B, i32 2
+ %vecext19 = extractelement <8 x i32> %B, i32 3
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %B, i32 4
+ %vecext23 = extractelement <8 x i32> %B, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %B, i32 6
+ %vecext27 = extractelement <8 x i32> %B, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_vphadd_d_test
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 4
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 5
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 6
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 7
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 8
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 9
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 10
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 11
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_vphadd_w_test
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2: vphaddw
+; CHECK: ret
+
+
+; Verify that we don't select horizontal subs in the following functions.
+
+define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 1
+ %vecext7 = extractelement <4 x i32> %B, i32 0
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 3
+ %vecext11 = extractelement <4 x i32> %B, i32 2
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_1
+; CHECK-NOT: phsubd
+; CHECK: ret
+
+
+define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 3
+ %vecext7 = extractelement <4 x float> %B, i32 2
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_2
+; CHECK-NOT: hsubps
+; CHECK: ret
+
+
+define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 1
+ %vecext3 = extractelement <2 x double> %A, i32 0
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: not_a_hsub_3
+; CHECK-NOT: hsubpd
+; CHECK: ret
+
+
+; Test AVX horizontal add/sub of packed single/double precision
+; floating point values from 256-bit vectors.
+
+define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %add12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %add16 = fadd float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %add16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %add20 = fadd float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %add20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %add24 = fadd float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %add24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %add28 = fadd float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %add28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhadd_ps
+; SSE3: haddps
+; SSE3-NEXT: haddps
+; SSSE3: haddps
+; SSSE3-NEXT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+
+define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %sub12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %sub16 = fsub float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %sub16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %sub20 = fsub float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %sub20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %sub24 = fsub float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %sub24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %sub28 = fsub float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %sub28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhsub_ps
+; SSE3: hsubps
+; SSE3-NEXT: hsubps
+; SSSE3: hsubps
+; SSSE3-NEXT: hsubps
+; AVX: vhsubps
+; AVX2: vhsubps
+; CHECK: ret
+
+
+define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hadd_pd
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3-NEXT: haddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hsub_pd
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+; Test AVX2 horizontal add of packed integer values from 256-bit vectors.
+
+define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %b, i32 0
+ %vecext7 = extractelement <8 x i32> %b, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %b, i32 2
+ %vecext11 = extractelement <8 x i32> %b, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %a, i32 4
+ %vecext15 = extractelement <8 x i32> %a, i32 5
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %a, i32 6
+ %vecext19 = extractelement <8 x i32> %a, i32 7
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %b, i32 4
+ %vecext23 = extractelement <8 x i32> %b, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %b, i32 6
+ %vecext27 = extractelement <8 x i32> %b, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_hadd_d
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2-NOT: vphaddd
+; CHECK: ret
+
+
+define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 8
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 9
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 10
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 11
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 4
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 5
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 6
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 7
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_hadd_w
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2-NOT: vphaddw
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll
new file mode 100644
index 0000000..954a9d9
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-undef.ll
@@ -0,0 +1,325 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mattr=+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
+
+define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit5, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test1_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test2_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ ret <4 x float> %vecinit9
+}
+; CHECK-LABEL: test3_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ ret <4 x float> %vecinit
+}
+; CHECK-LABEL: test4_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
+ %vecext = extractelement <2 x double> %a, i32 0
+ %vecext1 = extractelement <2 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ ret <2 x double> %vecinit
+}
+; CHECK-LABEL: test5_undef
+; CHECK-NOT: haddpd
+; CHECK: ret
+
+
+define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test6_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %b, i32 0
+ %vecext1 = extractelement <4 x float> %b, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 2
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test7_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 2
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test8_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test9_undef
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 2
+ %vecext3 = extractelement <8 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 3
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test10_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 4
+ %vecext3 = extractelement <8 x float> %b, i32 5
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 6
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test11_undef
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test12_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add1 = fadd float %vecext, %vecext1
+ %vecinit1 = insertelement <8 x float> undef, float %add1, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add2 = fadd float %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x float> %vecinit1, float %add2, i32 1
+ %vecext4 = extractelement <8 x float> %a, i32 4
+ %vecext5 = extractelement <8 x float> %a, i32 5
+ %add3 = fadd float %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x float> %vecinit2, float %add3, i32 2
+ %vecext6 = extractelement <8 x float> %a, i32 6
+ %vecext7 = extractelement <8 x float> %a, i32 7
+ %add4 = fadd float %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x float> %vecinit3, float %add4, i32 3
+ ret <8 x float> %vecinit4
+}
+; CHECK-LABEL: test13_undef
+; SSE: haddps
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 2
+ %vecext3 = extractelement <8 x i32> %b, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test14_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: phaddd
+; CHECK: ret
+
+; On AVX2, the following sequence can be folded into a single horizontal add.
+; If the Subtarget doesn't support AVX2, then we avoid emitting two packed
+; integer horizontal adds instead of two scalar adds followed by vector inserts.
+define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 4
+ %vecext3 = extractelement <8 x i32> %b, i32 5
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test15_undef
+; SSE-NOT: phaddd
+; AVX-NOT: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test16_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add1 = add i32 %vecext, %vecext1
+ %vecinit1 = insertelement <8 x i32> undef, i32 %add1, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add2 = add i32 %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x i32> %vecinit1, i32 %add2, i32 1
+ %vecext4 = extractelement <8 x i32> %a, i32 4
+ %vecext5 = extractelement <8 x i32> %a, i32 5
+ %add3 = add i32 %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x i32> %vecinit2, i32 %add3, i32 2
+ %vecext6 = extractelement <8 x i32> %a, i32 6
+ %vecext7 = extractelement <8 x i32> %a, i32 7
+ %add4 = add i32 %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
+ ret <8 x i32> %vecinit4
+}
+; CHECK-LABEL: test17_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/i8-umulo.ll b/test/CodeGen/X86/i8-umulo.ll
new file mode 100644
index 0000000..ba846f3
--- /dev/null
+++ b/test/CodeGen/X86/i8-umulo.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=generic -march=x86 < %s | FileCheck %s
+; PR19858
+
+declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b)
+define i8 @testumulo(i32 %argc) {
+; CHECK: imulw
+; CHECK: testb %{{.+}}, %{{.+}}
+; CHECK: je [[NOOVERFLOWLABEL:.+]]
+; CHECK: {{.*}}[[NOOVERFLOWLABEL]]:
+; CHECK-NEXT: movb
+; CHECK-NEXT: retl
+top:
+ %RHS = trunc i32 %argc to i8
+ %umul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 25, i8 %RHS)
+ %ex = extractvalue { i8, i1 } %umul, 1
+ br i1 %ex, label %overflow, label %nooverlow
+
+overflow:
+ ret i8 %RHS
+
+nooverlow:
+ %umul.value = extractvalue { i8, i1 } %umul, 0
+ ret i8 %umul.value
+}
diff --git a/test/CodeGen/X86/jump_table_alias.ll b/test/CodeGen/X86/jump_table_alias.ll
new file mode 100644
index 0000000..f3691fd
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_alias.ll
@@ -0,0 +1,33 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+entry:
+ ret i32 0
+}
+
+@i = alias internal i32 ()* @f
+@j = alias i32 ()* @f
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %temp = alloca i32 ()*, align 8
+ store i32 ()* @i, i32()** %temp, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_1
+ %1 = load i32 ()** %temp, align 8
+; CHECK: movl $__llvm_jump_instr_table_0_1
+ %2 = call i32 ()* %1()
+ %3 = call i32 ()* @i()
+; CHECK: callq i
+ %4 = call i32 ()* @j()
+; CHECK: callq j
+ ret i32 %3
+}
+
+; There should only be one table, even though there are two GlobalAliases,
+; because they both alias the same value.
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp f@PLT
+
diff --git a/test/CodeGen/X86/jump_table_bitcast.ll b/test/CodeGen/X86/jump_table_bitcast.ll
new file mode 100644
index 0000000..33a798f
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_bitcast.ll
@@ -0,0 +1,46 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @g(i8* %a) unnamed_addr jumptable {
+ ret i32 0
+}
+
+define void @h(void ()* %func) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @main() {
+ %g = alloca i32 (...)*, align 8
+ store i32 (...)* bitcast (i32 ()* @f to i32 (...)*), i32 (...)** %g, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_[[ENTRY:1|2|3]], (%rsp)
+; CHECK: movl $__llvm_jump_instr_table_0_[[ENTRY]], %ecx
+ %1 = load i32 (...)** %g, align 8
+ %call = call i32 (...)* %1()
+ call void (void ()*)* @h(void ()* bitcast (void (void ()*)* @h to void ()*))
+; CHECK: movl $__llvm_jump_instr_table_0_{{1|2|3}}, %edi
+; CHECK: callq h
+
+ %a = call i32 (i32*)* bitcast (i32 (i8*)* @g to i32(i32*)*)(i32* null)
+; CHECK: callq g
+ ret i32 %a
+}
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_2
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_2,@function
+; CHECK: __llvm_jump_instr_table_0_2:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_3
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_3,@function
+; CHECK: __llvm_jump_instr_table_0_3:
+; CHECK: jmp {{f|g|h}}@PLT
+
diff --git a/test/CodeGen/X86/jump_tables.ll b/test/CodeGen/X86/jump_tables.ll
new file mode 100644
index 0000000..5a0aed0
--- /dev/null
+++ b/test/CodeGen/X86/jump_tables.ll
@@ -0,0 +1,272 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck --check-prefix=SINGLE %s
+; RUN: llc <%s -jump-table-type=arity | FileCheck --check-prefix=ARITY %s
+; RUN: llc <%s -jump-table-type=simplified | FileCheck --check-prefix=SIMPL %s
+; RUN: llc <%s -jump-table-type=full | FileCheck --check-prefix=FULL %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.fun_struct = type { i32 (...)* }
+
+define void @indirect_fun() unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_match() unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_i32() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @indirect_fun_i32_1(i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32 @indirect_fun_i32_2(i32 %a, i32 %b) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32* @indirect_fun_i32S_2(i32* %a, i32 %b) unnamed_addr jumptable {
+ ret i32* %a
+}
+
+define void @indirect_fun_struct(%struct.fun_struct %fs) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_fun(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_fun_ret(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define void @indirect_fun_array([19 x i8] %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec(<3 x i32> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec_2(<4 x float> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @m(void ()* %fun) {
+ call void ()* %fun()
+ ret i32 0
+}
+
+define void ()* @get_fun() {
+ ret void ()* @indirect_fun
+; SINGLE: movl $__llvm_jump_instr_table_0_
+; ARITY: movl $__llvm_jump_instr_table_
+; SIMPL: movl $__llvm_jump_instr_table_
+; FULL: movl $__llvm_jump_instr_table_
+}
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %f = call void ()* ()* @get_fun()
+ %a = call i32 @m(void ()* %f)
+ ret i32 %a
+}
+
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_1
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_1:
+; SINGLE-DAG: jmp indirect_fun_array@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_2,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_2:
+; SINGLE-DAG: jmp indirect_fun_i32_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_3
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_3,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_3:
+; SINGLE-DAG: jmp indirect_fun_vec_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_4
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_4,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_4:
+; SINGLE-DAG: jmp indirect_fun_i32S_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_5
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_5,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_5:
+; SINGLE-DAG: jmp indirect_fun_struct@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_6
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_6,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_6:
+; SINGLE-DAG: jmp indirect_fun_i32_1@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_7
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_7,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_7:
+; SINGLE-DAG: jmp indirect_fun_i32@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_8
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_8,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_8:
+; SINGLE-DAG: jmp indirect_fun_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_9
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_9,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_9:
+; SINGLE-DAG: jmp indirect_fun_fun_ret@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_10
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_10,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_10:
+; SINGLE-DAG: jmp indirect_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_11
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_11,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_11:
+; SINGLE-DAG: jmp indirect_fun_match@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_12
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_12,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_12:
+; SINGLE-DAG: jmp indirect_fun_vec@PLT
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+
+
+; ARITY-DAG: .globl __llvm_jump_instr_table_2_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_2_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_2_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: ud2
+; ARITY-DAG: .globl __llvm_jump_instr_table_0_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_0_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_0_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .globl __llvm_jump_instr_table_1_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_1_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_1_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+
+; SIMPL-DAG: .globl __llvm_jump_instr_table_2_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_2_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: ud2
+; SIMPL-DAG: .globl __llvm_jump_instr_table_0_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_0_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_1_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_1_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_3_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_3_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_4_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_4_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+
+
+; FULL-DAG: .globl __llvm_jump_instr_table_10_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_10_1,@function
+; FULL-DAG:__llvm_jump_instr_table_10_1:
+; FULL-DAG: jmp indirect_fun_i32_1@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_9_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_9_1,@function
+; FULL-DAG:__llvm_jump_instr_table_9_1:
+; FULL-DAG: jmp indirect_fun_i32_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_7_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_7_1,@function
+; FULL-DAG:__llvm_jump_instr_table_7_1:
+; FULL-DAG: jmp indirect_fun_i32S_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_3_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; FULL-DAG:__llvm_jump_instr_table_3_1:
+; FULL-DAG: jmp indirect_fun_vec_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_2_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; FULL-DAG:__llvm_jump_instr_table_2_1:
+; FULL-DAG: jmp indirect_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_8_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_8_1,@function
+; FULL-DAG:__llvm_jump_instr_table_8_1:
+; FULL-DAG: jmp indirect_fun_i32@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_1_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; FULL-DAG:__llvm_jump_instr_table_1_1:
+; FULL-DAG: jmp indirect_fun_array@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_0_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; FULL-DAG:__llvm_jump_instr_table_0_1:
+; FULL-DAG: jmp indirect_fun_vec@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_6_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_6_1,@function
+; FULL-DAG:__llvm_jump_instr_table_6_1:
+; FULL-DAG: jmp indirect_fun_struct@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_5_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_5_1,@function
+; FULL-DAG:__llvm_jump_instr_table_5_1:
+; FULL-DAG: jmp indirect_fun_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_4_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; FULL-DAG:__llvm_jump_instr_table_4_1:
+; FULL-DAG: jmp indirect_fun_fun_ret@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
diff --git a/test/CodeGen/X86/libcall-sret.ll b/test/CodeGen/X86/libcall-sret.ll
new file mode 100644
index 0000000..67b99ac
--- /dev/null
+++ b/test/CodeGen/X86/libcall-sret.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=i686-linux-gnu -o - %s | FileCheck %s
+
+@var = global i128 0
+
+; We were trying to convert the i128 operation into a libcall, but failing to
+; perform sret demotion when we couldn't return the result in registers. Make
+; sure we marshal the return properly:
+
+define void @test_sret_libcall(i128 %l, i128 %r) {
+; CHECK-LABEL: test_sret_libcall:
+
+ ; Stack for call: 4(sret ptr), 16(i128 %l), 16(128 %r). So next logical
+ ; (aligned) place for the actual sret data is %esp + 40.
+; CHECK: leal 40(%esp), [[SRET_ADDR:%[a-z]+]]
+; CHECK: movl [[SRET_ADDR]], (%esp)
+; CHECK: calll __multi3
+; CHECK-DAG: movl 40(%esp), [[RES0:%[a-z]+]]
+; CHECK-DAG: movl 44(%esp), [[RES1:%[a-z]+]]
+; CHECK-DAG: movl 48(%esp), [[RES2:%[a-z]+]]
+; CHECK-DAG: movl 52(%esp), [[RES3:%[a-z]+]]
+; CHECK-DAG: movl [[RES0]], var
+; CHECK-DAG: movl [[RES1]], var+4
+; CHECK-DAG: movl [[RES2]], var+8
+; CHECK-DAG: movl [[RES3]], var+12
+ %prod = mul i128 %l, %r
+ store i128 %prod, i128* @var
+ ret void
+}
diff --git a/test/CodeGen/X86/lit.local.cfg b/test/CodeGen/X86/lit.local.cfg
index 3d91b03..8ed58f1 100644
--- a/test/CodeGen/X86/lit.local.cfg
+++ b/test/CodeGen/X86/lit.local.cfg
@@ -6,7 +6,6 @@
# cleanly.
config.suffixes = ['.ll', '.test', '.txt']
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/lower-bitcast.ll b/test/CodeGen/X86/lower-bitcast.ll
index b9b29a5..f47161e 100644
--- a/test/CodeGen/X86/lower-bitcast.ll
+++ b/test/CodeGen/X86/lower-bitcast.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
define double @test1(double %A) {
@@ -9,14 +10,19 @@ define double @test1(double %A) {
}
; FIXME: Ideally we should be able to fold the entire body of @test1 into a
; single paddd instruction. At the moment we produce the sequence
-; pshufd+paddq+pshufd.
-
+; pshufd+paddq+pshufd. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test1
; CHECK-NOT: movsd
; CHECK: pshufd
-; CHECK-NEXT: paddq
+; CHECK-NEXT: paddd
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test1
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
define double @test2(double %A, double %B) {
@@ -26,17 +32,15 @@ define double @test2(double %A, double %B) {
%3 = bitcast <2 x i32> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test2 into a
-; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddq+pshufd.
-
; CHECK-LABEL: test2
; CHECK-NOT: movsd
-; CHECK: pshufd
-; CHECK-NEXT: pshufd
-; CHECK-NEXT: paddq
-; CHECK-NEXT: pshufd
+; CHECK: paddd
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test2
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
define i64 @test3(i64 %A) {
@@ -50,6 +54,12 @@ define i64 @test3(i64 %A) {
; CHECK: addps
; CHECK-NOT: pshufd
; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test3
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: addps
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: ret
define i64 @test4(i64 %A) {
@@ -59,13 +69,20 @@ define i64 @test4(i64 %A) {
ret i64 %2
}
; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
-; Ideally, we should fold that sequence into a single paddd.
-
+; Ideally, we should fold that sequence into a single paddd. This is fixed with
+; the widening legalization.
+;
; CHECK-LABEL: test4
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test4
+; CHECK-WIDE: movd %{{rdi|rcx}},
+; CHECK-WIDE-NEXT: paddd
+; CHECK-WIDE-NEXT: movd {{.*}}, %rax
+; CHECK-WIDE: ret
define double @test5(double %A) {
@@ -77,6 +94,10 @@ define double @test5(double %A) {
; CHECK-LABEL: test5
; CHECK: addps
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test5
+; CHECK-WIDE: addps
+; CHECK-WIDE-NEXT: ret
define double @test6(double %A) {
@@ -86,14 +107,20 @@ define double @test6(double %A) {
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test6 into a
-; single paddw instruction.
-
+; single paddw instruction. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test6
; CHECK-NOT: movsd
; CHECK: punpcklwd
-; CHECK-NEXT: paddd
+; CHECK-NEXT: paddw
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test6
+; CHECK-WIDE-NOT: mov
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
define double @test7(double %A, double %B) {
@@ -103,17 +130,17 @@ define double @test7(double %A, double %B) {
%3 = bitcast <4 x i16> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test7 into a
-; single 'paddw %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddd+pshufd.
-
; CHECK-LABEL: test7
; CHECK-NOT: movsd
-; CHECK: punpcklwd
-; CHECK-NEXT: punpcklwd
-; CHECK-NEXT: paddd
-; CHECK-NEXT: pshufb
+; CHECK-NOT: punpcklwd
+; CHECK: paddw
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test7
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
define double @test8(double %A) {
@@ -124,14 +151,20 @@ define double @test8(double %A) {
}
; FIXME: Ideally we should be able to fold the entire body of @test8 into a
; single paddb instruction. At the moment we produce the sequence
-; pshufd+paddw+pshufd.
-
+; pshufd+paddw+pshufd. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test8
; CHECK-NOT: movsd
; CHECK: punpcklbw
-; CHECK-NEXT: paddw
+; CHECK-NEXT: paddb
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test8
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
define double @test9(double %A, double %B) {
@@ -141,15 +174,15 @@ define double @test9(double %A, double %B) {
%3 = bitcast <8 x i8> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test9 into a
-; single 'paddb %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddw+pshufd.
-
; CHECK-LABEL: test9
; CHECK-NOT: movsd
-; CHECK: punpcklbw
-; CHECK-NEXT: punpcklbw
-; CHECK-NEXT: paddw
-; CHECK-NEXT: pshufb
+; CHECK-NOT: punpcklbw
+; CHECK: paddb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test9
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
diff --git a/test/CodeGen/X86/macho-comdat.ll b/test/CodeGen/X86/macho-comdat.ll
new file mode 100644
index 0000000..3c2d997
--- /dev/null
+++ b/test/CodeGen/X86/macho-comdat.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -mtriple x86_64-apple-darwin < %s 2> %t
+; RUN: FileCheck < %t %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+; CHECK: LLVM ERROR: MachO doesn't support COMDATs, 'f' cannot be lowered.
diff --git a/test/CodeGen/X86/null-streamer.ll b/test/CodeGen/X86/null-streamer.ll
index 7c0e82f..fa77fcb 100644
--- a/test/CodeGen/X86/null-streamer.ll
+++ b/test/CodeGen/X86/null-streamer.ll
@@ -1,6 +1,7 @@
; Check the MCNullStreamer operates correctly, at least on a minimal test case.
;
; RUN: llc -filetype=null -o %t -march=x86 %s
+; RUN: llc -filetype=null -o %t -mtriple=i686-cygwin %s
define void @f0() {
ret void
@@ -9,3 +10,20 @@ define void @f0() {
define void @f1() {
ret void
}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !" ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !""}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"", metadata !"", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* null, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null}
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786484, i32 0, null, metadata !"i", metadata !"i", metadata !"_ZL1i", metadata !5, i32 1, metadata !8, i32 1, i32 1, null, null}
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
+!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/X86/pr20020.ll b/test/CodeGen/X86/pr20020.ll
new file mode 100644
index 0000000..83dae36
--- /dev/null
+++ b/test/CodeGen/X86/pr20020.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -disable-lsr -post-RA-scheduler=1 -break-anti-dependencies=critical | FileCheck %s
+
+; In PR20020, the critical anti-dependency breaker algorithm mistakenly
+; changes the register operands of an 'xorl %eax, %eax' to 'xorl %ecx, %ecx'
+; and then immediately reloads %rcx with a value based on the wrong %rax
+
+; CHECK-NOT: xorl %ecx, %ecx
+; CHECK: leaq 1(%rax), %rcx
+
+
+%struct.planet = type { double, double, double }
+
+; Function Attrs: nounwind ssp uwtable
+define void @advance(i32 %nbodies, %struct.planet* nocapture %bodies) #0 {
+entry:
+ %cmp4 = icmp sgt i32 %nbodies, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.end38
+
+for.body.preheader: ; preds = %entry
+ %gep = getelementptr %struct.planet* %bodies, i64 1, i32 1
+ %gep13 = bitcast double* %gep to %struct.planet*
+ %0 = add i32 %nbodies, -1
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.inc20
+ %iv19 = phi i32 [ %0, %for.body.preheader ], [ %iv.next, %for.inc20 ]
+ %iv = phi %struct.planet* [ %gep13, %for.body.preheader ], [ %gep14, %for.inc20 ]
+ %iv9 = phi i64 [ %iv.next10, %for.inc20 ], [ 0, %for.body.preheader ]
+ %iv.next10 = add nuw nsw i64 %iv9, 1
+ %1 = trunc i64 %iv.next10 to i32
+ %cmp22 = icmp slt i32 %1, %nbodies
+ br i1 %cmp22, label %for.body3.lr.ph, label %for.inc20
+
+for.body3.lr.ph: ; preds = %for.body
+ %x = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 0
+ %y = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 1
+ %vx = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 2
+ br label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %iv20 = phi i32 [ %iv.next21, %for.body3 ], [ %iv19, %for.body3.lr.ph ]
+ %iv15 = phi %struct.planet* [ %gep16, %for.body3 ], [ %iv, %for.body3.lr.ph ]
+ %iv1517 = bitcast %struct.planet* %iv15 to double*
+ %2 = load double* %x, align 8
+ %gep18 = getelementptr double* %iv1517, i64 -1
+ %3 = load double* %gep18, align 8
+ %sub = fsub double %2, %3
+ %4 = load double* %y, align 8
+ %5 = load double* %iv1517, align 8
+ %sub8 = fsub double %4, %5
+ %add10 = fadd double %sub, %sub8
+ %call = tail call double @sqrt(double %sub8) #2
+ store double %add10, double* %vx, align 8
+ %gep16 = getelementptr %struct.planet* %iv15, i64 1
+ %iv.next21 = add i32 %iv20, -1
+ %exitcond = icmp eq i32 %iv.next21, 0
+ br i1 %exitcond, label %for.inc20, label %for.body3
+
+for.inc20: ; preds = %for.body3, %for.body
+ %lftr.wideiv11 = trunc i64 %iv.next10 to i32
+ %gep14 = getelementptr %struct.planet* %iv, i64 1
+ %iv.next = add i32 %iv19, -1
+ %exitcond12 = icmp eq i32 %lftr.wideiv11, %nbodies
+ br i1 %exitcond12, label %for.end38, label %for.body
+
+for.end38: ; preds = %for.inc20, %entry
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @sqrt(double) #1
+
+attributes #0 = { "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/pr20088.ll b/test/CodeGen/X86/pr20088.ll
new file mode 100644
index 0000000..3a82962
--- /dev/null
+++ b/test/CodeGen/X86/pr20088.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @foo(<16 x i8> %x) {
+; CHECK: vpblendvb
+ %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> zeroinitializer, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %x)
+ ret <16 x i8> %res;
+}
diff --git a/test/CodeGen/X86/pr5145.ll b/test/CodeGen/X86/pr5145.ll
index d048db8..32a797b 100644
--- a/test/CodeGen/X86/pr5145.ll
+++ b/test/CodeGen/X86/pr5145.ll
@@ -5,29 +5,29 @@ define void @atomic_maxmin_i8() {
; CHECK: atomic_maxmin_i8
%1 = atomicrmw max i8* @sc8, i8 5 acquire
; CHECK: [[LABEL1:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovl
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL1]]
%2 = atomicrmw min i8* @sc8, i8 6 acquire
; CHECK: [[LABEL3:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovg
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL3]]
%3 = atomicrmw umax i8* @sc8, i8 7 acquire
; CHECK: [[LABEL5:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovb
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL5]]
%4 = atomicrmw umin i8* @sc8, i8 8 acquire
; CHECK: [[LABEL7:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmova
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL7]]
diff --git a/test/CodeGen/X86/pshufd-combine-crash.ll b/test/CodeGen/X86/pshufd-combine-crash.ll
new file mode 100644
index 0000000..84c69e3
--- /dev/null
+++ b/test/CodeGen/X86/pshufd-combine-crash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -debug
+
+; REQUIRES: asserts
+
+; Test that the dag combiner doesn't assert if we try to replace a sequence of two
+; v4f32 X86ISD::PSHUFD nodes with a single PSHUFD.
+
+
+define <4 x float> @test(<4 x float> %V) {
+ %1 = shufflevector <4 x float> %V, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+
diff --git a/test/CodeGen/X86/rdpmc.ll b/test/CodeGen/X86/rdpmc.ll
new file mode 100644
index 0000000..7f1ca46
--- /dev/null
+++ b/test/CodeGen/X86/rdpmc.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=x86-64 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86-64
+; RUN: llc < %s -march=x86 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+
+; Verify that we correctly lower the "Read Performance-Monitoring Counters"
+; x86 builtin.
+
+
+define i64 @test_builtin_read_pmc(i32 %ID) {
+ %1 = tail call i64 @llvm.x86.rdpmc(i32 %ID)
+ ret i64 %1
+}
+; CHECK-LABEL: test_builtin_read_pmc
+; CHECK: rdpmc
+; X86-NOT: shlq
+; X86-NOT: or
+; X86-64: shlq
+; X86-64: or
+; CHECK-NOT: mov
+; CHECK: ret
+
+declare i64 @llvm.x86.rdpmc(i32 %ID)
+
diff --git a/test/CodeGen/X86/shift-parts.ll b/test/CodeGen/X86/shift-parts.ll
index ce4f538..ddad307 100644
--- a/test/CodeGen/X86/shift-parts.ll
+++ b/test/CodeGen/X86/shift-parts.ll
@@ -1,10 +1,12 @@
-; RUN: llc < %s -march=x86-64 | grep shrdq
+; RUN: llc -march=x86-64 < %s | FileCheck %s
; PR4736
%0 = type { i32, i8, [35 x i8] }
@g_144 = external global %0, align 8 ; <%0*> [#uses=1]
+; CHECK: shrdq
+
define i32 @int87(i32 %uint64p_8) nounwind {
entry:
%srcval4 = load i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
diff --git a/test/CodeGen/X86/sqrt.ll b/test/CodeGen/X86/sqrt.ll
new file mode 100644
index 0000000..be7c6e8
--- /dev/null
+++ b/test/CodeGen/X86/sqrt.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define float @test_sqrt_f32(float %a) {
+; SSE2-LABEL: test_sqrt_f32
+; SSE2: sqrtss %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f32
+; AVX: vsqrtss %xmm0, %xmm0
+ %res = call float @llvm.sqrt.f32(float %a)
+ ret float %res
+}
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define double @test_sqrt_f64(double %a) {
+; SSE2-LABEL: test_sqrt_f64
+; SSE2: sqrtsd %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f64
+; AVX: vsqrtsd %xmm0, %xmm0
+ %res = call double @llvm.sqrt.f64(double %a)
+ ret double %res
+}
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index cfc892d..c906ecd 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -717,3 +717,30 @@ define void @test_x86_sse2_pause() {
ret void
}
declare void @llvm.x86.sse2.pause() nounwind
+
+define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
+; CHECK-LABEL: test_x86_sse2_pshuf_d:
+; CHECK: pshufd $27
+entry:
+ %res = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) nounwind readnone
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufl_w:
+; CHECK: pshuflw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufh_w:
+; CHECK: pshufhw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
new file mode 100644
index 0000000..b7706cc
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -0,0 +1,318 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+; Verify that we correctly generate 'addsub' instructions from
+; a sequence of vector extracts + float add/sub + vector inserts.
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %4, %3
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test3
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test4
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test5
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test6
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %1 = extractelement <4 x double> %A, i32 0
+ %2 = extractelement <4 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <4 x double> %A, i32 2
+ %4 = extractelement <4 x double> %B, i32 2
+ %sub2 = fsub double %3, %4
+ %5 = extractelement <4 x double> %A, i32 1
+ %6 = extractelement <4 x double> %B, i32 1
+ %add = fadd double %5, %6
+ %7 = extractelement <4 x double> %A, i32 3
+ %8 = extractelement <4 x double> %B, i32 3
+ %add2 = fadd double %7, %8
+ %vecinsert1 = insertelement <4 x double> undef, double %add, i32 1
+ %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add2, i32 3
+ %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub, i32 0
+ %vecinsert4 = insertelement <4 x double> %vecinsert3, double %sub2, i32 2
+ ret <4 x double> %vecinsert4
+}
+; CHECK-LABEL: test7
+; SSE: addsubpd
+; SSE-NEXT: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
+ %1 = extractelement <2 x double> %A, i32 0
+ %2 = extractelement <2 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <2 x double> %A, i32 1
+ %4 = extractelement <2 x double> %B, i32 1
+ %add = fadd double %3, %4
+ %vecinsert1 = insertelement <2 x double> undef, double %sub, i32 0
+ %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add, i32 1
+ ret <2 x double> %vecinsert2
+}
+; CHECK-LABEL: test8
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK: ret
+
+
+define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
+ %1 = extractelement <8 x float> %A, i32 0
+ %2 = extractelement <8 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <8 x float> %A, i32 2
+ %4 = extractelement <8 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <8 x float> %A, i32 1
+ %6 = extractelement <8 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <8 x float> %A, i32 3
+ %8 = extractelement <8 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %9 = extractelement <8 x float> %A, i32 4
+ %10 = extractelement <8 x float> %B, i32 4
+ %sub3 = fsub float %9, %10
+ %11 = extractelement <8 x float> %A, i32 6
+ %12 = extractelement <8 x float> %B, i32 6
+ %sub4 = fsub float %11, %12
+ %13 = extractelement <8 x float> %A, i32 5
+ %14 = extractelement <8 x float> %B, i32 5
+ %add3 = fadd float %13, %14
+ %15 = extractelement <8 x float> %A, i32 7
+ %16 = extractelement <8 x float> %B, i32 7
+ %add4 = fadd float %15, %16
+ %vecinsert1 = insertelement <8 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <8 x float> %vecinsert3, float %sub2, i32 2
+ %vecinsert5 = insertelement <8 x float> %vecinsert4, float %add3, i32 5
+ %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add4, i32 7
+ %vecinsert7 = insertelement <8 x float> %vecinsert6, float %sub3, i32 4
+ %vecinsert8 = insertelement <8 x float> %vecinsert7, float %sub4, i32 6
+ ret <8 x float> %vecinsert8
+}
+; CHECK-LABEL: test9
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+; Verify that we don't generate addsub instruction for the following
+; functions.
+define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 3
+ %2 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 3
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %sub2, i32 2
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, undef
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, undef
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/sse3-avx-addsub.ll b/test/CodeGen/X86/sse3-avx-addsub.ll
new file mode 100644
index 0000000..8b66743
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -0,0 +1,296 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSE -check-prefix=CHECK
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=AVX -check-prefix=CHECK
+
+; Test ADDSUB ISel patterns.
+
+; Functions below are obtained from the following source:
+;
+; typedef double double2 __attribute__((ext_vector_type(2)));
+; typedef double double4 __attribute__((ext_vector_type(4)));
+; typedef float float4 __attribute__((ext_vector_type(4)));
+; typedef float float8 __attribute__((ext_vector_type(8)));
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A - B;
+; float4 Y = A + B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A - B;
+; float8 Y = A + B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A - B;
+; double4 Y = A + B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A - B;
+; double2 Y = A + B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
+ %1 = load <4 x float>* %B
+ %add = fadd <4 x float> %A, %1
+ %sub = fsub <4 x float> %A, %1
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1b
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
+ %1 = load <8 x float>* %B
+ %add = fadd <8 x float> %A, %1
+ %sub = fsub <8 x float> %A, %1
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2b
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
+ %1 = load <4 x double>* %B
+ %add = fadd <4 x double> %A, %1
+ %sub = fsub <4 x double> %A, %1
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3b
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
+ %1 = load <2 x double>* %B
+ %sub = fsub <2 x double> %A, %1
+ %add = fadd <2 x double> %A, %1
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4b
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+; Functions below are obtained from the following source:
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A + B;
+; float4 Y = A - B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A + B;
+; float8 Y = A - B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A + B;
+; double4 Y = A - B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A + B;
+; double2 Y = A - B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
+
+define <4 x float> @test5b(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %B, %A
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6b(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %B, %A
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7b(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %B, %A
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8b(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %B, %A
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll
index 8ad7987..3a48121 100644
--- a/test/CodeGen/X86/sse41-blend.ll
+++ b/test/CodeGen/X86/sse41-blend.ll
@@ -117,6 +117,24 @@ define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) {
%1 = select <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i8> %xyzw, <16 x i8> %abcd
ret <16 x i8> %1
}
+
declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+; CHECK: blendps $6, %xmm1, %xmm0
+; CHECK: ret
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xi16
+; CHECK: pblendw $134, %xmm1, %xmm0
+; CHECK: ret
+define <8 x i16> @blend_shufflevector_8xi16(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15>
+ ret <8 x i16> %1
+}
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index a3c6201..6726a3e 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -692,3 +692,25 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
%13 = fadd <4 x float> %11, %12
ret <4 x float> %13
}
+
+define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
+; CHECK-LABEL: insertps_with_undefs:
+; CHECK-NOT: shufps
+; CHECK: insertps $32, %xmm0
+; CHECK: ret
+ %1 = load float* %b, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
+ ret <4 x float> %result
+}
+
+; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
+; the destination index to change the load, instead of the source index.
+define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
+; CHECK-LABEL: pr20087:
+; CHECK: insertps $48
+; CHECK: ret
+ %load = load <4 x float> *%ptr
+ %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
+ ret <4 x float> %ret
+}
diff --git a/test/CodeGen/X86/stackmap-fast-isel.ll b/test/CodeGen/X86/stackmap-fast-isel.ll
new file mode 100644
index 0000000..0b7e6db
--- /dev/null
+++ b/test/CodeGen/X86/stackmap-fast-isel.ll
@@ -0,0 +1,165 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -fast-isel -fast-isel-abort | FileCheck %s
+
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 4
+; Num LargeConstants
+; CHECK-NEXT: .long 3
+; Num Callsites
+; CHECK-NEXT: .long 7
+
+; Functions and stack size
+; CHECK-NEXT: .quad _constantargs
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _liveConstant
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _directFrameIdx
+; CHECK-NEXT: .quad 40
+; CHECK-NEXT: .quad _longid
+; CHECK-NEXT: .quad 8
+
+; Large Constants
+; CHECK-NEXT: .quad 2147483648
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+
+; Callsites
+; Constant arguments
+;
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .long L{{.*}}-_constantargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 12
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65536
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2000000000
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2147483647
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 0
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 1
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+; LargeConstant at index 2
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+
+define void @constantargs() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 15, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
+ ret void
+}
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long L{{.*}}-_liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 5, i32 33)
+ ret void
+}
+
+; Directly map an alloca's address.
+;
+; Callsite 16
+; CHECK-LABEL: .long L{{.*}}-_directFrameIdx
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+
+define void @directFrameIdx() {
+entry:
+ %metadata1 = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata1
+ store i64 12, i64* %metadata1
+ store i64 13, i64* %metadata1
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ ret void
+}
+
+; Test a 64-bit ID.
+;
+; CHECK: .quad 4294967295
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 4294967296
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 9223372036854775807
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad -1
+; CHECK-LABEL: .long L{{.*}}-_longid
+define void @longid() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967295, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967296, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 9223372036854775807, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 -1, i32 0)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/CodeGen/X86/stackmap-liveness.ll b/test/CodeGen/X86/stackmap-liveness.ll
index 9ce5254..897595d 100644
--- a/test/CodeGen/X86/stackmap-liveness.ll
+++ b/test/CodeGen/X86/stackmap-liveness.ll
@@ -1,6 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-stackmap-liveness| FileCheck -check-prefix=STACK %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-patchpoint-liveness| FileCheck -check-prefix=PATCH %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-patchpoint-liveness=false | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck -check-prefix=PATCH %s
;
; Note: Print verbose stackmaps using -debug-only=stackmaps.
@@ -37,36 +36,21 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 1 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 2
-; STACK-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 1 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
-; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 1
+; PATCH-NEXT: .short 1
+; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 12, i8* null, i32 0)
%a2 = call i64 asm sideeffect "", "={r8}"() nounwind
%a3 = call i8 asm sideeffect "", "={ah}"() nounwind
%a4 = call <4 x double> asm sideeffect "", "={ymm0}"() nounwind
@@ -83,52 +67,37 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 2 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 6
-; STACK-NEXT: .short 6
-; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
-; STACK-NEXT: .short 0
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 1
-; LiveOut Entry 2: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 3: %R8 (8 bytes)
-; STACK-NEXT: .short 8
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 4: %YMM0 (32 bytes)
-; STACK-NEXT: .short 17
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 32
-; LiveOut Entry 5: %YMM1 (32 bytes)
-; STACK-NEXT: .short 18
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 32
-; LiveOut Entry 6: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 2 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
+; Num LiveOut Entries: 5
+; PATCH-NEXT: .short 5
+; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
; PATCH-NEXT: .short 0
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 1
+; LiveOut Entry 2: %R8 (8 bytes)
+; PATCH-NEXT: .short 8
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 3: %YMM0 (32 bytes)
+; PATCH-NEXT: .short 17
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 4: %YMM1 (32 bytes)
+; PATCH-NEXT: .short 18
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 5: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 2, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 2, i32 12, i8* null, i32 0)
call void asm sideeffect "", "{r8},{ah},{ymm0},{ymm1}"(i64 %a2, i8 %a3, <4 x double> %a4, <4 x double> %a5) nounwind
; StackMap 3 (no liveness information available)
@@ -142,36 +111,25 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 3 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 2
-; STACK-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 3 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
-; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 2
+; PATCH-NEXT: .short 2
+; LiveOut Entry 1: %RSP (8 bytes)
+; PATCH-NEXT: .short 7
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 12, i8* null, i32 0)
call void asm sideeffect "", "{xmm2}"(<2 x double> %a1) nounwind
ret void
}
@@ -179,33 +137,6 @@ entry:
define void @mixed_liveness() {
entry:
%a1 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
-; StackMap 4 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_mixed_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 1
-; STACK-NEXT: .short 1
-; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
-
-; StackMap 5 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_mixed_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 0
-; STACK-NEXT: .short 0
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 4 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_mixed_liveness
; PATCH-NEXT: .short 0
diff --git a/test/CodeGen/X86/swizzle-2.ll b/test/CodeGen/X86/swizzle-2.ll
new file mode 100644
index 0000000..4b1f903
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-2.ll
@@ -0,0 +1,515 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+
+define <4 x i32> @swizzle_1(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_2(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_3(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_4(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_5(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_6(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_7(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_8(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_8
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_9(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_9
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_10(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_10
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_11(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_11
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_12(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_12
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_13(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_13
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_14(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_14
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_15(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_15
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_16(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_16
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_17(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_17
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_18(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_18
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_19(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_19
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_20(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_20
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_21(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_21
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_22(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_22
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_23(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_23
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_24(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_24
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_25(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_25
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_26(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_26
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_27(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_27
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_28(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_28
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_29(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_29
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+; Make sure that we combine the shuffles from each function below into a single
+; legal shuffle (either pshuflw or pshufb depending on the masks).
+
+define <8 x i16> @swizzle_30(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_30
+; Mask: [1,3,2,0,5,7,6,4]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK-NEXT: ret
+
+
+define <8 x i16> @swizzle_31(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_31
+; Mask: [1,3,2,0,4,5,6,7]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_32(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_32
+; Mask: [2,3,0,1,4,5,6,7] --> equivalent to pshufd mask [1,0,2,3]
+; CHECK: pshufd $-31
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+define <8 x i16> @swizzle_33(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_33
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_34(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_34
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_35(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_35
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_36(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_36
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_37(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 4, i32 6, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_37
+; Mask: [0,1,2,3,4,7,6,5]
+; CHECK: pshufhw $108
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_38(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_38
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_39(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_39
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_40(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_40
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_41(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_41
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_42(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_42
+; Mask: [0,1,2,3,5,4,7,6]
+; CHECK: pshufhw $-79
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/swizzle-avx2.ll b/test/CodeGen/X86/swizzle-avx2.ll
new file mode 100644
index 0000000..29dfa6c
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-avx2.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+; Check that we produce a single vector permute / shuffle in all cases.
+
+define <8 x i32> @swizzle_1(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_2(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_3(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_4(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_5(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_6(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_7(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/testb-je-fusion.ll b/test/CodeGen/X86/testb-je-fusion.ll
new file mode 100644
index 0000000..9e946ae
--- /dev/null
+++ b/test/CodeGen/X86/testb-je-fusion.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
+
+; testb should be scheduled right before je to enable macro-fusion.
+
+; CHECK: testb $2, %{{[abcd]}}h
+; CHECK-NEXT: je
+
+define i32 @check_flag(i32 %flags, ...) nounwind {
+entry:
+ %and = and i32 %flags, 512
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ br label %if.end
+
+if.end:
+ %hasflag = phi i32 [ 1, %if.then ], [ 0, %entry ]
+ ret i32 %hasflag
+}
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index 5f6e7a8..1a6c05d 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -1,8 +1,20 @@
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
;CHECK-LABEL: foo1_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_8:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm1
+;CHECK-WIDE-NEXT: vpslld $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpshufb {{.*}}, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
+;CHECK-WIDE-NEXT: ret
define <8 x float> @foo1_8(<8 x i8> %src) {
%res = sitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -11,6 +23,13 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
;CHECK-LABEL: foo1_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_4:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
+;CHECK-WIDE-NEXT: ret
define <4 x float> @foo1_4(<4 x i8> %src) {
%res = sitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
@@ -19,6 +38,10 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
;CHECK-LABEL: foo2_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_8:
+;CHECK-WIDE: vcvtdq2ps %ymm{{.*}}, %ymm{{.*}}
+;CHECK-WIDE: ret
define <8 x float> @foo2_8(<8 x i8> %src) {
%res = uitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -27,6 +50,10 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
;CHECK-LABEL: foo2_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_4:
+;CHECK-WIDE: vcvtdq2ps %xmm{{.*}}, %xmm{{.*}}
+;CHECK-WIDE: ret
define <4 x float> @foo2_4(<4 x i8> %src) {
%res = uitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
diff --git a/test/CodeGen/X86/vec_splat.ll b/test/CodeGen/X86/vec_splat.ll
index a02e383..28f2a90 100644
--- a/test/CodeGen/X86/vec_splat.ll
+++ b/test/CodeGen/X86/vec_splat.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse2 | FileCheck %s -check-prefix=SSE2
; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse3 | FileCheck %s -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=AVX
define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) nounwind {
%tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0 ; <<4 x float>> [#uses=1]
@@ -37,6 +38,23 @@ define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind {
define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
%1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
%2 = load <4 x float>* %1, align 16
+ %3 = trunc i64 %j to i32
+ %4 = extractelement <4 x float> %2, i32 %3
+ %5 = insertelement <4 x float> undef, float %4, i32 0
+ %6 = insertelement <4 x float> %5, float %4, i32 1
+ %7 = insertelement <4 x float> %6, float %4, i32 2
+ %8 = insertelement <4 x float> %7, float %4, i32 3
+ ret <4 x float> %8
+
+; AVX-LABEL: load_extract_splat
+; AVX-NOT: rsp
+; AVX: vbroadcastss
+}
+
+; Fold extract of a load into the load's address computation. This avoids spilling to the stack.
+define <4 x float> @load_extract_splat1(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
+ %1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
+ %2 = load <4 x float>* %1, align 16
%3 = extractelement <4 x float> %2, i64 %j
%4 = insertelement <4 x float> undef, float %3, i32 0
%5 = insertelement <4 x float> %4, float %3, i32 1
@@ -44,7 +62,7 @@ define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64
%7 = insertelement <4 x float> %6, float %3, i32 3
ret <4 x float> %7
-; AVX-LABEL: load_extract_splat
+; AVX-LABEL: load_extract_splat1
; AVX-NOT: movs
; AVX: vbroadcastss
}
diff --git a/test/CodeGen/X86/vec_split.ll b/test/CodeGen/X86/vec_split.ll
index f9e7c20..bc2c663 100644
--- a/test/CodeGen/X86/vec_split.ll
+++ b/test/CodeGen/X86/vec_split.ll
@@ -40,3 +40,36 @@ define <32 x i16> @split32(<32 x i16> %a, <32 x i16> %b, <32 x i8> %__mask) {
%2 = select <32 x i1> %1, <32 x i16> %a, <32 x i16> %b
ret <32 x i16> %2
}
+
+; PR19492
+define i128 @split128(<2 x i128> %a, <2 x i128> %b) {
+; SSE4-LABEL: split128:
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: ret
+; AVX1-LABEL: split128:
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: ret
+; AVX2-LABEL: split128:
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: ret
+ %add = add nsw <2 x i128> %a, %b
+ %rdx.shuf = shufflevector <2 x i128> %add, <2 x i128> undef, <2 x i32> <i32 undef, i32 0>
+ %bin.rdx = add <2 x i128> %add, %rdx.shuf
+ %e = extractelement <2 x i128> %bin.rdx, i32 1
+ ret i128 %e
+}
diff --git a/test/CodeGen/X86/vector-gep.ll b/test/CodeGen/X86/vector-gep.ll
index 9c68f44..3f7ee3a 100644
--- a/test/CodeGen/X86/vector-gep.ll
+++ b/test/CodeGen/X86/vector-gep.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux -mcpu=corei7-avx | FileCheck %s
; RUN: opt -instsimplify -disable-output < %s
;CHECK-LABEL: AGEP0:
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
index 4c30184..b6d43e9 100644
--- a/test/CodeGen/X86/vector-idiv.ll
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -8,7 +8,7 @@ define <4 x i32> @test1(<4 x i32> %a) {
; SSE41-LABEL: test1:
; SSE41: pmuludq
-; SSE41: pshufd $57
+; SSE41: pshufd $49
; SSE41: pmuludq
; SSE41: shufps $-35
; SSE41: psubd
@@ -18,7 +18,7 @@ define <4 x i32> @test1(<4 x i32> %a) {
; AVX-LABEL: test1:
; AVX: vpmuludq
-; AVX: vpshufd $57
+; AVX: vpshufd $49
; AVX: vpmuludq
; AVX: vshufps $-35
; AVX: vpsubd
@@ -32,11 +32,11 @@ define <8 x i32> @test2(<8 x i32> %a) {
ret <8 x i32> %div
; AVX-LABEL: test2:
-; AVX: vpermd
+; AVX: vpbroadcastd
+; AVX: vpalignr $4
; AVX: vpmuludq
-; AVX: vshufps $-35
; AVX: vpmuludq
-; AVX: vshufps $-35
+; AVX: vpblendd $170
; AVX: vpsubd
; AVX: vpsrld $1
; AVX: vpadd
@@ -107,6 +107,12 @@ define <16 x i16> @test6(<16 x i16> %a) {
define <16 x i8> @test7(<16 x i8> %a) {
%div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %div
+
+; FIXME: scalarized
+; SSE41-LABEL: test7:
+; SSE41: pext
+; AVX-LABEL: test7:
+; AVX: pext
}
define <4 x i32> @test8(<4 x i32> %a) {
@@ -115,8 +121,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; SSE41-LABEL: test8:
; SSE41: pmuldq
-; SSE41: pshufd $57
-; SSE41-NOT: pshufd $57
+; SSE41: pshufd $49
+; SSE41-NOT: pshufd $49
; SSE41: pmuldq
; SSE41: shufps $-35
; SSE41: pshufd $-40
@@ -130,8 +136,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; SSE: pand
; SSE: paddd
; SSE: pmuludq
-; SSE: pshufd $57
-; SSE-NOT: pshufd $57
+; SSE: pshufd $49
+; SSE-NOT: pshufd $49
; SSE: pmuludq
; SSE: shufps $-35
; SSE: pshufd $-40
@@ -143,8 +149,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; AVX-LABEL: test8:
; AVX: vpmuldq
-; AVX: vpshufd $57
-; AVX-NOT: vpshufd $57
+; AVX: vpshufd $49
+; AVX-NOT: vpshufd $49
; AVX: vpmuldq
; AVX: vshufps $-35
; AVX: vpshufd $-40
@@ -159,12 +165,11 @@ define <8 x i32> @test9(<8 x i32> %a) {
ret <8 x i32> %div
; AVX-LABEL: test9:
+; AVX: vpalignr $4
; AVX: vpbroadcastd
; AVX: vpmuldq
-; AVX: vshufps $-35
; AVX: vpmuldq
-; AVX: vshufps $-35
-; AVX: vpshufd $-40
+; AVX: vpblendd $170
; AVX: vpadd
; AVX: vpsrld $31
; AVX: vpsrad $2
@@ -177,10 +182,10 @@ define <8 x i32> @test10(<8 x i32> %a) {
; AVX-LABEL: test10:
; AVX: vpbroadcastd
+; AVX: vpalignr $4
; AVX: vpmuludq
-; AVX: vshufps $-35
; AVX: vpmuludq
-; AVX: vshufps $-35
+; AVX: vpblendd $170
; AVX: vpsubd
; AVX: vpsrld $1
; AVX: vpadd
@@ -193,12 +198,11 @@ define <8 x i32> @test11(<8 x i32> %a) {
ret <8 x i32> %rem
; AVX-LABEL: test11:
+; AVX: vpalignr $4
; AVX: vpbroadcastd
; AVX: vpmuldq
-; AVX: vshufps $-35
; AVX: vpmuldq
-; AVX: vshufps $-35
-; AVX: vpshufd $-40
+; AVX: vpblendd $170
; AVX: vpadd
; AVX: vpsrld $31
; AVX: vpsrad $2
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
new file mode 100644
index 0000000..4da7e42
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -0,0 +1,196 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_0101010101010101(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_0101010101010101
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23
+; CHECK-SSE2: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT: punpckhbw %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm3
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm3
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm4
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm4
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm4 = xmm4[0],xmm3[1]
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: packuswb %xmm4, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 31, i32 30, i32 29, i32 28, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v8i16_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v8i16_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v4i32_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @trunc_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @trunc_v4i32_shuffle
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pand
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: packuswb %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i8> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
new file mode 100644
index 0000000..78b4ee7
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -0,0 +1,219 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_00
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_10(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_10
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_11(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_11
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_32(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_33(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x i64> %shuffle
+}
+
+define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_00
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_10
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_11(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_11
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
+; FIXME: Should these use movapd + shufpd to remove a domain change at the cost
+; of a mov?
+;
+; CHECK-SSE2-LABEL: @shuffle_v2f64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x double> %shuffle
+}
+
+
+define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v4.ll b/test/CodeGen/X86/vector-shuffle-128-v4.ll
new file mode 100644
index 0000000..7d496fa
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @shuffle_v4i32_0001(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0001
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0020(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0020
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0300(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0300
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_1000(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_1000
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_2200(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_2200
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3330(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3330
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3210(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3210
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0001
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0020
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0300
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_1000
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_2200
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3330
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3210
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x float> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0124
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0142
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0412
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[2,0],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4012
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[0,2],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0145
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0451
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,3,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4501
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4015
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
+ ret <4 x i32> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
new file mode 100644
index 0000000..5d1922a
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -0,0 +1,493 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01012323
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,0,1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_67452301(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_67452301
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_456789AB(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_456789AB
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00000000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00000000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_31206745
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,3,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 6, i32 7, i32 4, i32 5>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44440000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44440000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_75643120(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_75643120
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 7, i32 5, i32 6, i32 4, i32 3, i32 1, i32 2, i32 0>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_10545410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_10545410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 5, i32 4, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54105410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54105410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54101054(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54101054
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 1, i32 0, i32 5, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400440(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400440
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,4,4,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_40044004(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_40044004
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,0,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 0, i32 0, i32 4, i32 4, i32 0, i32 0, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26405173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_20645173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 0, i32 6, i32 4, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26401375
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 1, i32 3, i32 7, i32 5>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404567
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0X444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0X444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 undef, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44X04444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44X04444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 undef, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_X4404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_X4404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0127XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXX4563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4563XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01274563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,5,4,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_45630127
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,1,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,7,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 0, i32 1, i32 2, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08192a3b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08192a3b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d2e3f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d2e3f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 2, i32 14, i32 3, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4c5d6e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4c5d6e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_48596a7b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_48596a7b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 8, i32 5, i32 9, i32 6, i32 10, i32 7, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08196e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08196e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d6879(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d6879
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,0,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 6, i32 8, i32 7, i32 9>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_109832ba
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_8091a2b3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_8091a2b3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_c4d5e6f7(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_c4d5e6f7
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0213cedf
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 12, i32 14, i32 13, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_032dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_XXXcXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXcXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXXcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_cde3XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 13, i32 14, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm3 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpckhwd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm3, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
new file mode 100644
index 0000000..e60ecb7
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
+
+define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
+ ret <4 x i32> %c
+}
+
+define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd4
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd5
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -76)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd6
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd $0
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8)
+ ret <4 x i32> %c
+}
+
+define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ ret <8 x i16> %c
+}
+
+define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufhw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
diff --git a/test/CodeGen/X86/vselect.ll b/test/CodeGen/X86/vselect.ll
index 0cf03fc..42cf06a 100644
--- a/test/CodeGen/X86/vselect.ll
+++ b/test/CodeGen/X86/vselect.ll
@@ -262,3 +262,17 @@ define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
; CHECK: movsd
; CHECK: ret
+define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
+; CHECK-LABEL: select_of_shuffles_0
+; CHECK-DAG: movlhps %xmm2, [[REGA:%xmm[0-9]+]]
+; CHECK-DAG: movlhps %xmm3, [[REGB:%xmm[0-9]+]]
+; CHECK: subps [[REGB]], [[REGA]]
+ %1 = shufflevector <2 x float> %a0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %2 = shufflevector <2 x float> %a1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %3 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %2, <4 x float> %1
+ %4 = shufflevector <2 x float> %b0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %5 = shufflevector <2 x float> %b1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %6 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %5, <4 x float> %4
+ %7 = fsub <4 x float> %3, %6
+ ret <4 x float> %7
+}
diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll
index 1bc06a7..19b84f1 100644
--- a/test/CodeGen/X86/widen_cast-4.ll
+++ b/test/CodeGen/X86/widen_cast-4.ll
@@ -1,8 +1,9 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; CHECK: psraw
-; CHECK: psraw
+; RUN: llc < %s -march=x86 -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
+; CHECK-LABEL: update:
+; CHECK-WIDE-LABEL: update:
entry:
%dst_i.addr = alloca i64* ; <i64**> [#uses=2]
%src_i.addr = alloca i64* ; <i64**> [#uses=2]
@@ -44,6 +45,26 @@ forbody: ; preds = %forcond
%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
store <8 x i8> %shr, <8 x i8>* %arrayidx10
br label %forinc
+; CHECK: %forbody
+; CHECK: pmovzxbw
+; CHECK-NEXT: paddw
+; CHECK-NEXT: psllw $8
+; CHECK-NEXT: psraw $8
+; CHECK-NEXT: psraw $2
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: movlpd
+;
+; FIXME: We shouldn't require both a movd and an insert.
+; CHECK-WIDE: %forbody
+; CHECK-WIDE: movd
+; CHECK-WIDE-NEXT: pinsrd
+; CHECK-WIDE-NEXT: paddb
+; CHECK-WIDE-NEXT: psrlw $2
+; CHECK-WIDE-NEXT: pand
+; CHECK-WIDE-NEXT: pxor
+; CHECK-WIDE-NEXT: psubb
+; CHECK-WIDE-NEXT: pextrd
+; CHECK-WIDE-NEXT: movd
forinc: ; preds = %forbody
%tmp15 = load i32* %i ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/widen_cast-6.ll b/test/CodeGen/X86/widen_cast-6.ll
index 7c06ad8..46d8dd7 100644
--- a/test/CodeGen/X86/widen_cast-6.ll
+++ b/test/CodeGen/X86/widen_cast-6.ll
@@ -1,9 +1,13 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.1 | FileCheck %s
-; CHECK: movd
; Test bit convert that requires widening in the operand.
define i32 @return_v2hi() nounwind {
+; CHECK-LABEL: @return_v2hi
+; CHECK: pushl
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popl
+; CHECK-NEXT: ret
entry:
%retval12 = bitcast <2 x i16> zeroinitializer to i32 ; <i32> [#uses=1]
ret i32 %retval12
diff --git a/test/CodeGen/X86/widen_conversions.ll b/test/CodeGen/X86/widen_conversions.ll
new file mode 100644
index 0000000..522ab47
--- /dev/null
+++ b/test/CodeGen/X86/widen_conversions.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization -x86-experimental-vector-shuffle-lowering | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
+; CHECK-LABEL: zext_v4i8_to_v4i32:
+;
+; CHECK: movd (%{{.*}}), %[[X:xmm[0-9]+]]
+; CHECK-NEXT: pxor %[[Z:xmm[0-9]+]], %[[Z]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: ret
+
+ %val = load <4 x i8>* %ptr
+ %ext = zext <4 x i8> %val to <4 x i32>
+ ret <4 x i32> %ext
+}
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index 803402b..a355b75 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -33,7 +33,9 @@ entry:
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
entry:
; CHECK-LABEL: shuf3:
-; CHECK: shufps
+; CHECK-NOT: movlhps
+; CHECK-NOT: shufps
+; CHECK: pshufd
%shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
%tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/win64_eh.ll b/test/CodeGen/X86/win64_eh.ll
new file mode 100644
index 0000000..f1f874e
--- /dev/null
+++ b/test/CodeGen/X86/win64_eh.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64
+
+; Check function without prolog
+define void @foo0() uwtable {
+entry:
+ ret void
+}
+; WIN64-LABEL: foo0:
+; WIN64: .seh_proc foo0
+; WIN64: .seh_endprologue
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a small stack allocation
+define void @foo1() uwtable {
+entry:
+ %baz = alloca [2000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo1:
+; WIN64: .seh_proc foo1
+; WIN64: subq $4000, %rsp
+; WIN64: .seh_stackalloc 4000
+; WIN64: .seh_endprologue
+; WIN64: addq $4000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a stack allocation requiring call to __chkstk/___chkstk_ms
+define void @foo2() uwtable {
+entry:
+ %baz = alloca [4000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo2:
+; WIN64: .seh_proc foo2
+; WIN64: movabsq $8000, %rax
+; WIN64: callq {{__chkstk|___chkstk_ms}}
+; WIN64: subq %rax, %rsp
+; WIN64: .seh_stackalloc 8000
+; WIN64: .seh_endprologue
+; WIN64: addq $8000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Checks stack push
+define i32 @foo3(i32 %f_arg, i32 %e_arg, i32 %d_arg, i32 %c_arg, i32 %b_arg, i32 %a_arg) uwtable {
+entry:
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ %d = alloca i32
+ %e = alloca i32
+ %f = alloca i32
+ store i32 %a_arg, i32* %a
+ store i32 %b_arg, i32* %b
+ store i32 %c_arg, i32* %c
+ store i32 %d_arg, i32* %d
+ store i32 %e_arg, i32* %e
+ store i32 %f_arg, i32* %f
+ %tmp = load i32* %a
+ %tmp1 = mul i32 %tmp, 2
+ %tmp2 = load i32* %b
+ %tmp3 = mul i32 %tmp2, 3
+ %tmp4 = add i32 %tmp1, %tmp3
+ %tmp5 = load i32* %c
+ %tmp6 = mul i32 %tmp5, 5
+ %tmp7 = add i32 %tmp4, %tmp6
+ %tmp8 = load i32* %d
+ %tmp9 = mul i32 %tmp8, 7
+ %tmp10 = add i32 %tmp7, %tmp9
+ %tmp11 = load i32* %e
+ %tmp12 = mul i32 %tmp11, 11
+ %tmp13 = add i32 %tmp10, %tmp12
+ %tmp14 = load i32* %f
+ %tmp15 = mul i32 %tmp14, 13
+ %tmp16 = add i32 %tmp13, %tmp15
+ ret i32 %tmp16
+}
+; WIN64-LABEL: foo3:
+; WIN64: .seh_proc foo3
+; WIN64: pushq %rsi
+; WIN64: .seh_pushreg 6
+; WIN64: subq $24, %rsp
+; WIN64: .seh_stackalloc 24
+; WIN64: .seh_endprologue
+; WIN64: addq $24, %rsp
+; WIN64: popq %rsi
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Check emission of eh handler and handler data
+declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*)
+declare void @_d_eh_resume_unwind(i8*)
+
+declare i32 @bar()
+
+define i32 @foo4() #0 {
+entry:
+ %step = alloca i32, align 4
+ store i32 0, i32* %step
+ %tmp = load i32* %step
+
+ %tmp1 = invoke i32 @bar()
+ to label %finally unwind label %landingpad
+
+finally:
+ store i32 1, i32* %step
+ br label %endtryfinally
+
+landingpad:
+ %landing_pad = landingpad { i8*, i32 } personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality
+ cleanup
+ %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0
+ store i32 2, i32* %step
+ call void @_d_eh_resume_unwind(i8* %tmp3)
+ unreachable
+
+endtryfinally:
+ %tmp10 = load i32* %step
+ ret i32 %tmp10
+}
+; WIN64-LABEL: foo4:
+; WIN64: .seh_proc foo4
+; WIN64: .seh_handler _d_eh_personality, @unwind, @except
+; WIN64: subq $56, %rsp
+; WIN64: .seh_stackalloc 56
+; WIN64: .seh_endprologue
+; WIN64: addq $56, %rsp
+; WIN64: ret
+; WIN64: .seh_handlerdata
+; WIN64: .seh_endproc
+
+
+; Check stack re-alignment and xmm spilling
+define void @foo5() uwtable {
+entry:
+ %s = alloca i32, align 64
+ call void asm sideeffect "", "~{rbx},~{rdi},~{xmm6},~{xmm7}"()
+ ret void
+}
+; WIN64-LABEL: foo5:
+; WIN64: .seh_proc foo5
+; WIN64: pushq %rbp
+; WIN64: .seh_pushreg 5
+; WIN64: movq %rsp, %rbp
+; WIN64: pushq %rdi
+; WIN64: .seh_pushreg 7
+; WIN64: pushq %rbx
+; WIN64: .seh_pushreg 3
+; WIN64: andq $-64, %rsp
+; WIN64: subq $128, %rsp
+; WIN64: .seh_stackalloc 48
+; WIN64: .seh_setframe 5, 64
+; WIN64: movaps %xmm7, -32(%rbp) # 16-byte Spill
+; WIN64: movaps %xmm6, -48(%rbp) # 16-byte Spill
+; WIN64: .seh_savexmm 6, 16
+; WIN64: .seh_savexmm 7, 32
+; WIN64: .seh_endprologue
+; WIN64: movaps -48(%rbp), %xmm6 # 16-byte Reload
+; WIN64: movaps -32(%rbp), %xmm7 # 16-byte Reload
+; WIN64: leaq -16(%rbp), %rsp
+; WIN64: popq %rbx
+; WIN64: popq %rdi
+; WIN64: popq %rbp
+; WIN64: retq
+; WIN64: .seh_endproc
diff --git a/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
index 5d7a10b..08d0257 100644
--- a/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
+++ b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
@@ -3,7 +3,7 @@
; clang -Oz -c test1.cpp -emit-llvm -S -o
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift10(uint64_t a, uint64_t b)
; {
; return (a << 10) | (b >> 54);
@@ -25,7 +25,7 @@ attributes #0 = { minsize nounwind optsize readnone uwtable "less-precise-fpmad"
; clang -Os -c test2.cpp -emit-llvm -S
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift11(uint64_t a, uint64_t b)
; {
; return (a << 11) | (b >> 53);
@@ -46,7 +46,7 @@ attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false"
; clang -O2 -c test2.cpp -emit-llvm -S
; Verify that we do not generate shld insruction when we are not optimizing
; for size for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift12(uint64_t a, uint64_t b)
; {
; return (a << 12) | (b >> 52);
diff --git a/test/CodeGen/X86/x86-64-frameaddr.ll b/test/CodeGen/X86/x86-64-frameaddr.ll
deleted file mode 100644
index 7d36a7a..0000000
--- a/test/CodeGen/X86/x86-64-frameaddr.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; CHECK: stack_end_address
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: ret
-
-define i64* @stack_end_address() nounwind {
-entry:
- tail call i8* @llvm.frameaddress( i32 0 )
- bitcast i8* %0 to i64*
- ret i64* %1
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-64-static-relo-movl.ll b/test/CodeGen/X86/x86-64-static-relo-movl.ll
new file mode 100644
index 0000000..71e52bb
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-static-relo-movl.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=x86_64-pc-win32-macho -relocation-model=static -O0 < %s | FileCheck %s
+
+; Ensure that we don't generate a movl and not a lea for a static relocation
+; when compiling for 64 bit.
+
+%struct.MatchInfo = type [64 x i64]
+
+@NO_MATCH = internal constant %struct.MatchInfo zeroinitializer, align 8
+
+define void @setup() {
+ %pending = alloca %struct.MatchInfo, align 8
+ %t = bitcast %struct.MatchInfo* %pending to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
+ %u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
+ %v = load i64* %u, align 8
+ br label %done
+done:
+ ret void
+
+ ; CHECK: movabsq $_NO_MATCH, {{.*}}
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
diff --git a/test/CodeGen/X86/x86-frameaddr.ll b/test/CodeGen/X86/x86-frameaddr.ll
deleted file mode 100644
index d595874..0000000
--- a/test/CodeGen/X86/x86-frameaddr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | grep ebp
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-frameaddr2.ll b/test/CodeGen/X86/x86-frameaddr2.ll
deleted file mode 100644
index c509115..0000000
--- a/test/CodeGen/X86/x86-frameaddr2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 2)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
new file mode 100644
index 0000000..d885f1c
--- /dev/null
+++ b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mattr=+avx < %s | FileCheck %s
+
+; Check that we properly upgrade the AVX vbroadcast intrinsics to IR. The
+; expectation is that we should still get the original instruction back that
+; maps to the intrinsic.
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; CHECK-LABEL: test_mm_broadcast_ss:
+define <4 x float> @test_mm_broadcast_ss(float* readonly %__a){
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %xmm
+ %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0)
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_sd:
+define <4 x double> @test_mm256_broadcast_sd(double* readonly %__a) {
+entry:
+ %0 = bitcast double* %__a to i8*
+; CHECK: vbroadcastsd (%{{.*}}), %ymm
+ %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0)
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_ss:
+define <8 x float> @test_mm256_broadcast_ss(float* readonly %__a) {
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %ymm
+ %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
+ ret <8 x float> %1
+}
+
+declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
+
+declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
+
+declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*)
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
new file mode 100644
index 0000000..f078631
--- /dev/null
+++ b/test/CodeGen/X86/xaluo.ll
@@ -0,0 +1,743 @@
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=DAG
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s --check-prefix=FAST
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+;
+; Get the actual value of the overflow bit.
+;
+; SADDO reg, reg
+define zeroext i1 @saddo.i8(i8 signext %v1, i8 signext %v2, i8* %res) {
+entry:
+; DAG-LABEL: saddo.i8
+; DAG: addb %sil, %dil
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i8
+; FAST: addb %sil, %dil
+; FAST-NEXT: seto %al
+ %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: saddo.i16
+; DAG: addw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i16
+; FAST: addw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: saddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SADDO reg, imm | imm, reg
+; FIXME: INC isn't supported in FastISel yet
+define zeroext i1 @saddo.i64imm1(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm1
+; DAG: incq %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm1
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; FIXME: DAG doesn't optimize immediates on the LHS.
+define zeroext i1 @saddo.i64imm2(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm2
+; DAG: mov
+; DAG-NEXT: addq
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm2
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 1, i64 %v1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; Check boundary conditions for large immediates.
+define zeroext i1 @saddo.i64imm3(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm3
+; DAG: addq $-2147483648, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm3
+; FAST: addq $-2147483648, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm4(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm4
+; DAG: movabsq $-21474836489, %[[REG:[a-z]+]]
+; DAG-NEXT: addq %rdi, %[[REG]]
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm4
+; FAST: movabsq $-21474836489, %[[REG:[a-z]+]]
+; FAST-NEXT: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm5(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm5
+; DAG: addq $2147483647, %rdi
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm5
+; FAST: addq $2147483647, %rdi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; TODO: FastISel shouldn't use movabsq.
+define zeroext i1 @saddo.i64imm6(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm6
+; DAG: movl $2147483648, %ecx
+; DAG: addq %rdi, %rcx
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm6
+; FAST: movabsq $2147483648, %[[REG:[a-z]+]]
+; FAST: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UADDO
+define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: uaddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: uaddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SSUBO
+define zeroext i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: ssubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: ssubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; USUBO
+define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: usubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: usubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SMULO
+define zeroext i1 @smulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: smulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: imulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: smulo.i16
+; DAG: imulw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i16
+; FAST: imulw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: smulo.i32
+; DAG: imull %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i32
+; FAST: imull %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: smulo.i64
+; DAG: imulq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i64
+; FAST: imulq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UMULO
+define zeroext i1 @umulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: umulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: mulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: umulo.i16
+; DAG: mulw %si
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i16
+; FAST: mulw %si
+; FAST-NEXT: seto
+ %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: umulo.i32
+; DAG: mull %esi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i32
+; FAST: mull %esi
+; FAST-NEXT: seto
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: umulo.i64
+; DAG: mulq %rsi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i64
+; FAST: mulq %rsi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i32
+; CHECK: imull %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i64
+; CHECK: imulq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i32
+; CHECK: mull %esi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i32
+; CHECK: imull %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i64
+; CHECK: imulq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i32
+; CHECK: mull %esi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+declare {i8, i1} @llvm.sadd.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.umul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/XCore/dwarf_debug.ll b/test/CodeGen/XCore/dwarf_debug.ll
new file mode 100644
index 0000000..2f4b231
--- /dev/null
+++ b/test/CodeGen/XCore/dwarf_debug.ll
@@ -0,0 +1,39 @@
+; RUN: llc < %s -mtriple=xcore-unknown-unknown -O0 | FileCheck %s
+
+; target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32"
+; target triple = "xcore"
+
+; CHECK-LABEL: f
+; CHECK: entsp 2
+; ...the prologue...
+; CHECK: .loc 1 2 0 prologue_end # :2:0
+; CHECK: add r0, r0, 1
+; CHECK: retsp 2
+define i32 @f(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !11), !dbg !12
+ %0 = load i32* %a.addr, align 4, !dbg !12
+ %add = add nsw i32 %0, 1, !dbg !12
+ ret i32 %add, !dbg !12
+}
+
+declare void @llvm.dbg.declare(metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @f, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null}
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777218, metadata !8, i32 0, i32 0}
+!12 = metadata !{i32 2, i32 0, metadata !4, null}
+
diff --git a/test/CodeGen/XCore/lit.local.cfg b/test/CodeGen/XCore/lit.local.cfg
index 3e84c1b..0b947bb 100644
--- a/test/CodeGen/XCore/lit.local.cfg
+++ b/test/CodeGen/XCore/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'XCore' in targets:
+if not 'XCore' in config.root.targets:
config.unsupported = True