aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Mips')
-rw-r--r--test/CodeGen/Mips/2008-07-16-SignExtInReg.ll1
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll18
-rw-r--r--test/CodeGen/Mips/2013-11-18-fp64-const0.ll31
-rw-r--r--test/CodeGen/Mips/beqzc.ll20
-rw-r--r--test/CodeGen/Mips/beqzc1.ll24
-rw-r--r--test/CodeGen/Mips/blockaddr.ll10
-rw-r--r--test/CodeGen/Mips/brdelayslot.ll11
-rw-r--r--test/CodeGen/Mips/brsize3.ll33
-rw-r--r--test/CodeGen/Mips/brsize3a.ll26
-rw-r--r--test/CodeGen/Mips/bswap.ll3
-rw-r--r--test/CodeGen/Mips/buildpairextractelementf64.ll23
-rw-r--r--test/CodeGen/Mips/cmplarge.ll12
-rw-r--r--test/CodeGen/Mips/const1.ll35
-rw-r--r--test/CodeGen/Mips/const4a.ll180
-rw-r--r--test/CodeGen/Mips/const6.ll164
-rw-r--r--test/CodeGen/Mips/const6a.ll29
-rw-r--r--test/CodeGen/Mips/ctlz.ll27
-rw-r--r--test/CodeGen/Mips/disable-tail-merge.ll33
-rw-r--r--test/CodeGen/Mips/divrem.ll14
-rw-r--r--test/CodeGen/Mips/extins.ll9
-rw-r--r--test/CodeGen/Mips/f16abs.ll37
-rw-r--r--test/CodeGen/Mips/fixdfsf.ll18
-rw-r--r--test/CodeGen/Mips/fp16instrinsmc.ll27
-rw-r--r--test/CodeGen/Mips/fp16mix.ll92
-rw-r--r--test/CodeGen/Mips/fpneeded.ll2
-rw-r--r--test/CodeGen/Mips/fpnotneeded.ll2
-rw-r--r--test/CodeGen/Mips/fptr2.ll20
-rw-r--r--test/CodeGen/Mips/helloworld.ll22
-rw-r--r--test/CodeGen/Mips/hf16call32.ll4
-rw-r--r--test/CodeGen/Mips/hf16call32_body.ll26
-rw-r--r--test/CodeGen/Mips/hf1_body.ll21
-rw-r--r--test/CodeGen/Mips/hfptrcall.ll4
-rw-r--r--test/CodeGen/Mips/i32k.ll19
-rw-r--r--test/CodeGen/Mips/i64arg.ll20
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll4
-rw-r--r--test/CodeGen/Mips/lazy-binding.ll41
-rw-r--r--test/CodeGen/Mips/lit.local.cfg2
-rw-r--r--test/CodeGen/Mips/longbranch.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_1.ll2
-rw-r--r--test/CodeGen/Mips/mips16_32_10.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_3.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_4.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_5.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_6.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_7.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_8.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_9.ll6
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll16
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll78
-rw-r--r--test/CodeGen/Mips/msa/2r.ll257
-rw-r--r--test/CodeGen/Mips/msa/2r_vector_scalar.ll87
-rw-r--r--test/CodeGen/Mips/msa/2rf.ll323
-rw-r--r--test/CodeGen/Mips/msa/2rf_exup.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_float_int.ll90
-rw-r--r--test/CodeGen/Mips/msa/2rf_fq.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_int_float.ll217
-rw-r--r--test/CodeGen/Mips/msa/2rf_tq.ll50
-rw-r--r--test/CodeGen/Mips/msa/3r-a.ll1191
-rw-r--r--test/CodeGen/Mips/msa/3r-b.ll494
-rw-r--r--test/CodeGen/Mips/msa/3r-c.ll446
-rw-r--r--test/CodeGen/Mips/msa/3r-d.ll478
-rw-r--r--test/CodeGen/Mips/msa/3r-i.ll358
-rw-r--r--test/CodeGen/Mips/msa/3r-m.ll862
-rw-r--r--test/CodeGen/Mips/msa/3r-p.ll182
-rw-r--r--test/CodeGen/Mips/msa/3r-s.ll1353
-rw-r--r--test/CodeGen/Mips/msa/3r-v.ll105
-rw-r--r--test/CodeGen/Mips/msa/3r_4r.ll206
-rw-r--r--test/CodeGen/Mips/msa/3r_4r_widen.ll307
-rw-r--r--test/CodeGen/Mips/msa/3r_splat.ll94
-rw-r--r--test/CodeGen/Mips/msa/3rf.ll485
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf.ll106
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf_q.ll206
-rw-r--r--test/CodeGen/Mips/msa/3rf_exdo.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_float_int.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_int_float.ll974
-rw-r--r--test/CodeGen/Mips/msa/3rf_q.ll94
-rw-r--r--test/CodeGen/Mips/msa/arithmetic.ll726
-rw-r--r--test/CodeGen/Mips/msa/arithmetic_float.ll456
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll481
-rw-r--r--test/CodeGen/Mips/msa/basic_operations_float.ll207
-rw-r--r--test/CodeGen/Mips/msa/bit.ll537
-rw-r--r--test/CodeGen/Mips/msa/bitcast.ll1210
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll1639
-rw-r--r--test/CodeGen/Mips/msa/compare.ll2079
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll663
-rw-r--r--test/CodeGen/Mips/msa/elm_copy.ll162
-rw-r--r--test/CodeGen/Mips/msa/elm_cxcmsa.ll168
-rw-r--r--test/CodeGen/Mips/msa/elm_insv.ll192
-rw-r--r--test/CodeGen/Mips/msa/elm_move.ll25
-rw-r--r--test/CodeGen/Mips/msa/elm_shift_slide.ll158
-rw-r--r--test/CodeGen/Mips/msa/endian.ll107
-rw-r--r--test/CodeGen/Mips/msa/frameindex.ll85
-rw-r--r--test/CodeGen/Mips/msa/i10.ll89
-rw-r--r--test/CodeGen/Mips/msa/i5-a.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5-b.ll439
-rw-r--r--test/CodeGen/Mips/msa/i5-c.ll386
-rw-r--r--test/CodeGen/Mips/msa/i5-m.ll310
-rw-r--r--test/CodeGen/Mips/msa/i5-s.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5_ld_st.ll150
-rw-r--r--test/CodeGen/Mips/msa/i8.ll211
-rw-r--r--test/CodeGen/Mips/msa/inline-asm.ll34
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll134
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll138
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll31
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll27
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll141
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll149
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll152
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll33
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s525530439.ll139
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s997348632.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll23
-rw-r--r--test/CodeGen/Mips/msa/shift-dagcombine.ll70
-rw-r--r--test/CodeGen/Mips/msa/shuffle.ll803
-rw-r--r--test/CodeGen/Mips/msa/special.ll26
-rw-r--r--test/CodeGen/Mips/msa/spill.ll601
-rw-r--r--test/CodeGen/Mips/msa/vec.ll946
-rw-r--r--test/CodeGen/Mips/msa/vecs10.ll47
-rw-r--r--test/CodeGen/Mips/nomips16.ll38
-rw-r--r--test/CodeGen/Mips/o32_cc.ll242
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll62
-rw-r--r--test/CodeGen/Mips/powif64_16.ll26
-rw-r--r--test/CodeGen/Mips/ra-allocatable.ll367
-rw-r--r--test/CodeGen/Mips/rotate.ll5
-rw-r--r--test/CodeGen/Mips/sel1c.ll21
-rw-r--r--test/CodeGen/Mips/sel2c.ll21
-rw-r--r--test/CodeGen/Mips/simplebr.ll37
-rw-r--r--test/CodeGen/Mips/stack-alignment.ll13
-rw-r--r--test/CodeGen/Mips/tailcall.ll13
-rw-r--r--test/CodeGen/Mips/tnaked.ll4
131 files changed, 25280 insertions, 440 deletions
diff --git a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
index 8479ad2..3381143 100644
--- a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
+++ b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s
define signext i8 @A(i8 %e.0, i8 signext %sum) nounwind {
entry:
diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index dbde742..e274bc0 100644
--- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -51,3 +51,21 @@ entry:
ret void
}
+; Check that RA doesn't allocate registers in the clobber list.
+; CHECK-LABEL: foo4:
+; CHECK: #APP
+; CHECK-NOT: ulh $2
+; CHECK: #NO_APP
+; CHECK: #APP
+; CHECK-NOT: $f0
+; CHECK: #NO_APP
+
+define void @foo4() {
+entry:
+ %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
+ store i32 %0, i32* @gi2, align 4
+ %1 = load float* @gf0, align 4
+ %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
+ store double %2, double* @gd0, align 8
+ ret void
+}
diff --git a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
new file mode 100644
index 0000000..f8390d9
--- /dev/null
+++ b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=mips -mattr=-fp64 < %s | FileCheck -check-prefix=CHECK-FP32 %s
+; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
+
+; This test case is a simplified version of an llvm-stress generated test with
+; seed=3718491962.
+; It originally failed on MIPS32 with FP64 with the following error:
+; LLVM ERROR: ran out of registers during register allocation
+; This was caused by impossible register class restrictions caused by the use
+; of BuildPairF64 instead of BuildPairF64_64.
+
+define void @autogen_SD3718491962() {
+BB:
+ ; CHECK-FP32: mtc1 $zero, $f{{[0-3]*[02468]}}
+ ; CHECK-FP32: mtc1 $zero, $f{{[0-3]*[13579]}}
+
+ ; CHECK-FP64: mtc1 $zero, $f{{[0-9]+}}
+ ; CHECK-FP64-NOT: mtc1 $zero,
+ ; FIXME: A redundant mthc1 is currently emitted. Add a -NOT when it is
+ ; eliminated
+
+ %Cmp = fcmp ule double 0.000000e+00, undef
+ %Cmp11 = fcmp ueq double 0xFDBD965CF1BB7FDA, undef
+ br label %CF88
+
+CF88: ; preds = %CF86
+ %Sl18 = select i1 %Cmp, i1 %Cmp11, i1 %Cmp
+ br i1 %Sl18, label %CF88, label %CF85
+
+CF85: ; preds = %CF88
+ ret void
+}
diff --git a/test/CodeGen/Mips/beqzc.ll b/test/CodeGen/Mips/beqzc.ll
new file mode 100644
index 0000000..4a294c2
--- /dev/null
+++ b/test/CodeGen/Mips/beqzc.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ %. = select i1 %cmp, i32 10, i32 55
+ store i32 %., i32* @j, align 4
+; cond-b-short: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+ ret i32 0
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
+
diff --git a/test/CodeGen/Mips/beqzc1.ll b/test/CodeGen/Mips/beqzc1.ll
new file mode 100644
index 0000000..8f929a8
--- /dev/null
+++ b/test/CodeGen/Mips/beqzc1.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+; cond-b-short: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+if.then: ; preds = %entry
+ store i32 10, i32* @j, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/blockaddr.ll b/test/CodeGen/Mips/blockaddr.ll
index 7de7fa6..beab65f 100644
--- a/test/CodeGen/Mips/blockaddr.ll
+++ b/test/CodeGen/Mips/blockaddr.ll
@@ -4,6 +4,8 @@
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-2
@reg = common global i8* null, align 4
@@ -36,6 +38,14 @@ entry:
; STATIC-N64: daddiu ${{[0-9]+}}, $[[R2]], %got_ofst($tmp[[T2]])
; STATIC-N64: ld $[[R3:[0-9]+]], %got_page($tmp[[T3:[0-9]+]])
; STATIC-N64: daddiu ${{[0-9]+}}, $[[R3]], %got_ofst($tmp[[T3]])
+; STATIC-MIPS16-1: .ent f
+; STATIC-MIPS16-2: .ent f
+; STATIC-MIPS16-1: li $[[R1_16:[0-9]+]], %hi($tmp[[TI_16:[0-9]+]])
+; STATIC-MIPS16-1: sll ${{[0-9]+}}, $[[R1_16]], 16
+; STATIC-MIPS16-2: li ${{[0-9]+}}, %lo($tmp{{[0-9]+}})
+; STATIC-MIPS16-1 jal dummy
+; STATIC-MIPS16-2 jal dummy
+
define void @f() nounwind {
entry:
%call = tail call i8* @dummy(i8* blockaddress(@f, %baz))
diff --git a/test/CodeGen/Mips/brdelayslot.ll b/test/CodeGen/Mips/brdelayslot.ll
index 869ecd9..68341c1 100644
--- a/test/CodeGen/Mips/brdelayslot.ll
+++ b/test/CodeGen/Mips/brdelayslot.ll
@@ -160,7 +160,14 @@ for.end: ; preds = %for.body, %entry
;
; SUCCBB-LABEL: succbbs_br1:
; SUCCBB: beqz ${{[0-9]+}}, $BB
-; SUCCBB-NEXT: lw $25, %call16(foo100)
+; SUCCBB-NEXT: lw ${{[0-9]+}}, %got(foo101)(${{[0-9]+}})
+
+define internal fastcc void @foo101() {
+entry:
+ tail call void @foo100()
+ tail call void @foo100()
+ ret void
+}
define void @succbbs_br1(i32 %a) {
entry:
@@ -168,7 +175,7 @@ entry:
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @foo100() #1
+ tail call fastcc void @foo101()
br label %if.end
if.end: ; preds = %entry, %if.then
diff --git a/test/CodeGen/Mips/brsize3.ll b/test/CodeGen/Mips/brsize3.ll
new file mode 100644
index 0000000..7b1f440
--- /dev/null
+++ b/test/CodeGen/Mips/brsize3.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-no-short
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-long
+
+; ModuleID = 'brsize3.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+; Function Attrs: noreturn nounwind optsize
+define void @foo() #0 {
+entry:
+ br label %x
+
+x: ; preds = %x, %entry
+ tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !1
+ br label %x
+; b-long: $BB0_1:
+; b-long: #APP
+; b-long: .space 60000
+; b-long: #NO_APP
+; b-long: b $BB0_1
+; b-no-short: $BB0_1:
+; b-no-short: #APP
+; b-no-short: .space 60000
+; b-no-short: #NO_APP
+; b-no-short-NOT: b $BB0_1 # 16 bit inst
+
+}
+
+attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 45}
diff --git a/test/CodeGen/Mips/brsize3a.ll b/test/CodeGen/Mips/brsize3a.ll
new file mode 100644
index 0000000..6382fa2
--- /dev/null
+++ b/test/CodeGen/Mips/brsize3a.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-short
+
+; ModuleID = 'brsize3.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+; Function Attrs: noreturn nounwind optsize
+define void @foo() #0 {
+entry:
+ br label %x
+
+x: ; preds = %x, %entry
+ tail call void asm sideeffect ".space 200", ""() #1, !srcloc !1
+ br label %x
+; b-short: $BB0_1:
+; b-short: #APP
+; b-short: .space 200
+; b-short: #NO_APP
+; b-short: b $BB0_1 # 16 bit inst
+
+}
+
+attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 45}
diff --git a/test/CodeGen/Mips/bswap.ll b/test/CodeGen/Mips/bswap.ll
index 0da2d2b..f17b91a 100644
--- a/test/CodeGen/Mips/bswap.ll
+++ b/test/CodeGen/Mips/bswap.ll
@@ -1,11 +1,13 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=MIPS32
; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=MIPS64
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float | FileCheck %s -check-prefix=mips16
define i32 @bswap32(i32 %x) nounwind readnone {
entry:
; MIPS32-LABEL: bswap32:
; MIPS32: wsbh $[[R0:[0-9]+]]
; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; mips16: .ent bswap32
%or.3 = call i32 @llvm.bswap.i32(i32 %x)
ret i32 %or.3
}
@@ -15,6 +17,7 @@ entry:
; MIPS64-LABEL: bswap64:
; MIPS64: dsbh $[[R0:[0-9]+]]
; MIPS64: dshd ${{[0-9]+}}, $[[R0]]
+; mips16: .ent bswap64
%or.7 = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %or.7
}
diff --git a/test/CodeGen/Mips/buildpairextractelementf64.ll b/test/CodeGen/Mips/buildpairextractelementf64.ll
index 585bc25..490d427 100644
--- a/test/CodeGen/Mips/buildpairextractelementf64.ll
+++ b/test/CodeGen/Mips/buildpairextractelementf64.ll
@@ -1,20 +1,31 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
-; RUN: llc < %s -march=mips | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=FP32
+; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=FP32
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
+; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
+
@a = external global i32
+; CHECK-LABEL: f:
+; FP32: mtc1
+; FP32: mtc1
+; FP64-DAG: mtc1
+; FP64-DAG: mthc1
+
define double @f(i32 %a1, double %d) nounwind {
entry:
-; CHECK: mtc1
-; CHECK: mtc1
store i32 %a1, i32* @a, align 4
%add = fadd double %d, 2.000000e+00
ret double %add
}
+; CHECK-LABEL: f3:
+; FP32: mfc1
+; FP32: mfc1
+; FP64-DAG: mfc1
+; FP64-DAG: mfhc1
+
define void @f3(double %d, i32 %a1) nounwind {
entry:
-; CHECK: mfc1
-; CHECK: mfc1
tail call void @f2(i32 %a1, double %d) nounwind
ret void
}
diff --git a/test/CodeGen/Mips/cmplarge.ll b/test/CodeGen/Mips/cmplarge.ll
index b082fa3..2a3d30a 100644
--- a/test/CodeGen/Mips/cmplarge.ll
+++ b/test/CodeGen/Mips/cmplarge.ll
@@ -10,7 +10,7 @@ target triple = "mipsel--linux-gnu"
define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
entry:
%size_y = getelementptr inbounds %struct.StorablePicture* %s, i32 0, i32 1
- %0 = load i32* %size_y, align 4, !tbaa !0
+ %0 = load i32* %size_y, align 4
%sub = add nsw i32 %0, -1
%add5 = add nsw i32 %0, 20
%cmp6 = icmp sgt i32 %add5, -20
@@ -20,7 +20,7 @@ for.body: ; preds = %entry, %for.body
%j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ]
%call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2
%inc = add nsw i32 %j.07, 1
- %1 = load i32* %size_y, align 4, !tbaa !0
+ %1 = load i32* %size_y, align 4
%add = add nsw i32 %1, 20
%cmp = icmp slt i32 %inc, %add
br i1 %cmp, label %for.body, label %for.end
@@ -33,10 +33,6 @@ for.end: ; preds = %for.body, %entry
; cmp16: .end getSubImagesLuma
declare i32 @iClip3(...) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Mips/const1.ll b/test/CodeGen/Mips/const1.ll
new file mode 100644
index 0000000..cb2baca
--- /dev/null
+++ b/test/CodeGen/Mips/const1.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands < %s | FileCheck %s
+
+; ModuleID = 'const1.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mipsel-unknown-linux"
+
+@i = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+ store i32 -559023410, i32* @j, align 4
+ store i32 -87105875, i32* @k, align 4
+ store i32 262991277, i32* @l, align 4
+ ret void
+; CHECK: lw ${{[0-9]+}}, $CPI0_0
+; CHECK: lw ${{[0-9]+}}, $CPI0_1
+; CHECK: lw ${{[0-9]+}}, $CPI0_2
+; CHECK: $CPI0_0:
+; CHECK: .4byte 3735943886
+; CHECK: $CPI0_1:
+; CHECK: .4byte 4207861421
+; CHECK: $CPI0_2:
+; CHECK: .4byte 262991277
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b754974ec32ab712ea7d8b52cd8037b24e7d6ed3) (gitosis@dmz-portal.mips.com:llvm.git 8e211187b501bc73edb938fde0019c9a20bcffd5)"}
diff --git a/test/CodeGen/Mips/const4a.ll b/test/CodeGen/Mips/const4a.ll
new file mode 100644
index 0000000..0332327
--- /dev/null
+++ b/test/CodeGen/Mips/const4a.ll
@@ -0,0 +1,180 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands -mips-constant-islands-no-load-relaxation < %s | FileCheck %s -check-prefix=no-load-relax
+
+; ModuleID = 'const4.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+@b = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+ %0 = load i32* @b, align 4
+; no-load-relax lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.else
+; no-load-relax: beqz ${{[0-9]+}}, $BB0_3
+; no-load-relax: lw ${{[0-9]+}}, %call16(foo)(${{[0-9]+}})
+; no-load-relax: b $BB0_4
+; no-load-relax: .align 2
+; no-load-relax: $CPI0_0:
+; no-load-relax: .4byte 3735943886
+; no-load-relax: $BB0_3:
+; no-load-relax: lw ${{[0-9]+}}, %call16(goo)(${{[0-9]+}})
+if.then: ; preds = %entry
+ call void bitcast (void (...)* @foo to void ()*)()
+ br label %if.end
+
+if.else: ; preds = %entry
+ call void bitcast (void (...)* @goo to void ()*)()
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ ret void
+}
+
+declare void @foo(...) #1
+
+declare void @goo(...) #1
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
diff --git a/test/CodeGen/Mips/const6.ll b/test/CodeGen/Mips/const6.ll
new file mode 100644
index 0000000..20cdc09
--- /dev/null
+++ b/test/CodeGen/Mips/const6.ll
@@ -0,0 +1,164 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands -mips-constant-islands-no-load-relaxation < %s | FileCheck %s -check-prefix=no-load-relax
+
+; ModuleID = 'const6.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+@b = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+; load-relax: lw ${{[0-9]+}}, $CPI0_0
+; load-relax: jrc $ra
+; load-relax: .align 2
+; load-relax: $CPI0_0:
+; load-relax: .4byte 3735943886
+; load-relax: .end t
+
+; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
+; no-load-relax: jalrc ${{[0-9]+}}
+; no-load-relax: b $BB0_2
+; no-load-relax: .align 2
+; no-load-relax: $CPI0_0:
+; no-load-relax: .4byte 3735943886
+; no-load-relax: $BB0_2:
+
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ ret void
+}
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
+
+
diff --git a/test/CodeGen/Mips/const6a.ll b/test/CodeGen/Mips/const6a.ll
new file mode 100644
index 0000000..8b402ac
--- /dev/null
+++ b/test/CodeGen/Mips/const6a.ll
@@ -0,0 +1,29 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax1
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax
+
+; ModuleID = 'const6a.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+; load-relax-NOT: lw ${{[0-9]+}}, $CPI0_0 # 16 bit inst
+; load-relax1: lw ${{[0-9]+}}, $CPI0_0
+; load-relax: jrc $ra
+; load-relax: .align 2
+; load-relax: $CPI0_0:
+; load-relax: .4byte 3735943886
+; load-relax: .end t
+ call void asm sideeffect ".space 40000", ""() #1, !srcloc !1
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 121}
diff --git a/test/CodeGen/Mips/ctlz.ll b/test/CodeGen/Mips/ctlz.ll
new file mode 100644
index 0000000..2ddb727
--- /dev/null
+++ b/test/CodeGen/Mips/ctlz.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+
+@x = global i32 28912, align 4
+@y = common global i32 0, align 4
+
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @x, align 4
+ %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true)
+ store i32 %1, i32* @y, align 4
+ ret i32 0
+}
+
+; static: .end main
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) #1
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readnone }
+
diff --git a/test/CodeGen/Mips/disable-tail-merge.ll b/test/CodeGen/Mips/disable-tail-merge.ll
new file mode 100644
index 0000000..b4c093a
--- /dev/null
+++ b/test/CodeGen/Mips/disable-tail-merge.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+@g0 = common global i32 0, align 4
+@g1 = common global i32 0, align 4
+
+; CHECK: addiu ${{[0-9]+}}, ${{[0-9]+}}, 23
+; CHECK: addiu ${{[0-9]+}}, ${{[0-9]+}}, 23
+
+define i32 @test1(i32 %a) {
+entry:
+ %tobool = icmp eq i32 %a, 0
+ %0 = load i32* @g0, align 4
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* @g0, align 4
+ %1 = load i32* @g1, align 4
+ %add1 = add nsw i32 %1, 23
+ br label %if.end
+
+if.else:
+ %add2 = add nsw i32 %0, 11
+ store i32 %add2, i32* @g0, align 4
+ %2 = load i32* @g1, align 4
+ %add3 = add nsw i32 %2, 23
+ br label %if.end
+
+if.end:
+ %storemerge = phi i32 [ %add3, %if.else ], [ %add1, %if.then ]
+ store i32 %storemerge, i32* @g1, align 4
+ ret i32 %storemerge
+}
diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll
index a983c46..b631c3b 100644
--- a/test/CodeGen/Mips/divrem.ll
+++ b/test/CodeGen/Mips/divrem.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=TRAP
+; RUN: llc -march=mips -verify-machineinstrs < %s |\
+; RUN: FileCheck %s -check-prefix=TRAP
; RUN: llc -march=mips -mno-check-zero-division < %s |\
; RUN: FileCheck %s -check-prefix=NOCHECK
@@ -11,6 +12,9 @@
; NOCHECK-NOT: teq
; NOCHECK: .end sdiv1
+@g0 = common global i32 0, align 4
+@g1 = common global i32 0, align 4
+
define i32 @sdiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
%div = sdiv i32 %a0, %a1
@@ -67,3 +71,11 @@ entry:
%div = udiv i32 %a0, %a1
ret i32 %div
}
+
+define i32 @killFlags() {
+entry:
+ %0 = load i32* @g0, align 4
+ %1 = load i32* @g1, align 4
+ %div = sdiv i32 %0, %1
+ ret i32 %div
+}
diff --git a/test/CodeGen/Mips/extins.ll b/test/CodeGen/Mips/extins.ll
index a164f70..efaeeea 100644
--- a/test/CodeGen/Mips/extins.ll
+++ b/test/CodeGen/Mips/extins.ll
@@ -1,8 +1,10 @@
-; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2
+; RUN: llc < %s -march=mips -mcpu=mips16 | FileCheck %s -check-prefix=16
define i32 @ext0_5_9(i32 %s, i32 %pos, i32 %sz) nounwind readnone {
entry:
-; CHECK: ext ${{[0-9]+}}, $4, 5, 9
+; 32R2: ext ${{[0-9]+}}, $4, 5, 9
+; 16-NOT: ext ${{[0-9]+}}
%shr = lshr i32 %s, 5
%and = and i32 %shr, 511
ret i32 %and
@@ -10,7 +12,8 @@ entry:
define void @ins2_5_9(i32 %s, i32* nocapture %d) nounwind {
entry:
-; CHECK: ins ${{[0-9]+}}, $4, 5, 9
+; 32R2: ins ${{[0-9]+}}, $4, 5, 9
+; 16-NOT: ins ${{[0-9]+}}
%and = shl i32 %s, 5
%shl = and i32 %and, 16352
%tmp3 = load i32* %d, align 4
diff --git a/test/CodeGen/Mips/f16abs.ll b/test/CodeGen/Mips/f16abs.ll
new file mode 100644
index 0000000..928914f
--- /dev/null
+++ b/test/CodeGen/Mips/f16abs.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+
+@y = global double -1.450000e+00, align 8
+@x = common global double 0.000000e+00, align 8
+
+@y1 = common global float 0.000000e+00, align 4
+@x1 = common global float 0.000000e+00, align 4
+
+
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load double* @y, align 8
+ %call = tail call double @fabs(double %0) #2
+ store double %call, double* @x, align 8
+; static-NOT: .ent __call_stub_fp_fabs
+; static-NOT: jal fabs
+ %1 = load float* @y1, align 4
+ %call2 = tail call float @fabsf(float %1) #2
+ store float %call2, float* @x1, align 4
+; static-NOT: .ent __call_stub_fp_fabsf
+; static-NOT: jal fabsf
+ ret i32 0
+}
+
+; Function Attrs: nounwind optsize readnone
+declare double @fabs(double) #1
+
+declare float @fabsf(float) #1
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind optsize readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #2 = { nounwind optsize readnone }
+
+
+
diff --git a/test/CodeGen/Mips/fixdfsf.ll b/test/CodeGen/Mips/fixdfsf.ll
new file mode 100644
index 0000000..b08eefd
--- /dev/null
+++ b/test/CodeGen/Mips/fixdfsf.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic2
+
+@x = common global double 0.000000e+00, align 8
+@y = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @foo() {
+entry:
+ %0 = load double* @x, align 8
+ %conv = fptoui double %0 to i32
+ store i32 %conv, i32* @y, align 4
+; pic1: lw ${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}})
+; pic2: lw ${{[0-9]+}}, %got(__mips16_call_stub_2)(${{[0-9]+}})
+ ret void
+}
+
+
diff --git a/test/CodeGen/Mips/fp16instrinsmc.ll b/test/CodeGen/Mips/fp16instrinsmc.ll
index 3c01d56..bb43d27 100644
--- a/test/CodeGen/Mips/fp16instrinsmc.ll
+++ b/test/CodeGen/Mips/fp16instrinsmc.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=1010111 -mips-os16 < %s | FileCheck %s -check-prefix=fmask
@x = global float 1.500000e+00, align 4
@xn = global float -1.900000e+01, align 4
@@ -13,6 +14,14 @@
; Function Attrs: nounwind
define void @foo1() #0 {
+; fmask: .ent foo1
+; fmask: .set noreorder
+; fmask: .set nomacro
+; fmask: .set noat
+; fmask: .set at
+; fmask: .set macro
+; fmask: .set reorder
+; fmask: .end foo1
entry:
%0 = load float* @x, align 4
%1 = load float* @one, align 4
@@ -26,6 +35,9 @@ declare float @copysignf(float, float) #1
; Function Attrs: nounwind
define void @foo2() #0 {
+; fmask: .ent foo2
+; fmask: save {{.*}}
+; fmask: .end foo2
entry:
%0 = load float* @x, align 4
%1 = load float* @negone, align 4
@@ -37,6 +49,14 @@ entry:
; Function Attrs: nounwind
define void @foo3() #0 {
entry:
+; fmask: .ent foo3
+; fmask: .set noreorder
+; fmask: .set nomacro
+; fmask: .set noat
+; fmask: .set at
+; fmask: .set macro
+; fmask: .set reorder
+; fmask: .end foo3
%0 = load double* @xd, align 8
%1 = load float* @oned, align 4
%conv = fpext float %1 to double
@@ -51,6 +71,9 @@ declare double @copysign(double, double) #1
; Function Attrs: nounwind
define void @foo4() #0 {
entry:
+; fmask: .ent foo4
+; fmask: save {{.*}}
+; fmask: .end foo4
%0 = load double* @xd, align 8
%1 = load double* @negoned, align 8
%call = call double @copysign(double %0, double %1) #2
@@ -362,7 +385,7 @@ entry:
; Function Attrs: nounwind
declare double @exp2(double) #0
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
-attributes #1 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/test/CodeGen/Mips/fp16mix.ll b/test/CodeGen/Mips/fp16mix.ll
new file mode 100644
index 0000000..8d85099
--- /dev/null
+++ b/test/CodeGen/Mips/fp16mix.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10 -mips-os16 < %s | FileCheck %s -check-prefix=fmask1
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=01 -mips-os16 < %s | FileCheck %s -check-prefix=fmask2
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10. -mips-os16 < %s | FileCheck %s -check-prefix=fmask1nr
+
+; Function Attrs: nounwind optsize readnone
+define void @foo1() {
+entry:
+ ret void
+; fmask1: .ent foo1
+; fmask1: .set noreorder
+; fmask1: .set nomacro
+; fmask1: .set noat
+; fmask1: .set at
+; fmask1: .set macro
+; fmask1: .set reorder
+; fmask1: .end foo1
+; fmask2: .ent foo1
+; fmask2: save {{.*}}
+; fmask2: .end foo1
+; fmask1nr: .ent foo1
+; fmask1nr: .set noreorder
+; fmask1nr: .set nomacro
+; fmask1nr: .set noat
+; fmask1nr: .set at
+; fmask1nr: .set macro
+; fmask1nr: .set reorder
+; fmask1nr: .end foo1
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo2() {
+entry:
+ ret void
+; fmask2: .ent foo2
+; fmask2: .set noreorder
+; fmask2: .set nomacro
+; fmask2: .set noat
+; fmask2: .set at
+; fmask2: .set macro
+; fmask2: .set reorder
+; fmask2: .end foo2
+; fmask1: .ent foo2
+; fmask1: save {{.*}}
+; fmask1: .end foo2
+; fmask1nr: .ent foo2
+; fmask1nr: save {{.*}}
+; fmask1nr: .end foo2
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo3() {
+entry:
+ ret void
+; fmask1: .ent foo3
+; fmask1: .set noreorder
+; fmask1: .set nomacro
+; fmask1: .set noat
+; fmask1: .set at
+; fmask1: .set macro
+; fmask1: .set reorder
+; fmask1: .end foo3
+; fmask2: .ent foo3
+; fmask2: save {{.*}}
+; fmask2: .end foo3
+; fmask1r: .ent foo3
+; fmask1r: save {{.*}}
+; fmask1r: .end foo3
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo4() {
+entry:
+ ret void
+; fmask2: .ent foo4
+; fmask2: .set noreorder
+; fmask2: .set nomacro
+; fmask2: .set noat
+; fmask2: .set at
+; fmask2: .set macro
+; fmask2: .set reorder
+; fmask2: .end foo4
+; fmask1: .ent foo4
+; fmask1: save {{.*}}
+; fmask1: .end foo4
+; fmask1nr: .ent foo4
+; fmask1nr: save {{.*}}
+; fmask1nr: .end foo4
+}
+
+
diff --git a/test/CodeGen/Mips/fpneeded.ll b/test/CodeGen/Mips/fpneeded.ll
index 623883a..dcdebb9 100644
--- a/test/CodeGen/Mips/fpneeded.ll
+++ b/test/CodeGen/Mips/fpneeded.ll
@@ -131,7 +131,7 @@ entry:
; 32: .set reorder
; 32: .end foo3
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
define void @vv() #0 {
entry:
diff --git a/test/CodeGen/Mips/fpnotneeded.ll b/test/CodeGen/Mips/fpnotneeded.ll
index dc2ec10..b4fab64 100644
--- a/test/CodeGen/Mips/fpnotneeded.ll
+++ b/test/CodeGen/Mips/fpnotneeded.ll
@@ -57,7 +57,7 @@ entry:
; 32: restore {{.+}}
; 32: .end foo
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
define float @fv() #0 {
diff --git a/test/CodeGen/Mips/fptr2.ll b/test/CodeGen/Mips/fptr2.ll
new file mode 100644
index 0000000..77028db
--- /dev/null
+++ b/test/CodeGen/Mips/fptr2.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static16
+
+; Function Attrs: nounwind
+define double @my_mul(double %a, double %b) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ %b.addr = alloca double, align 8
+ store double %a, double* %a.addr, align 8
+ store double %b, double* %b.addr, align 8
+ %0 = load double* %a.addr, align 8
+ %1 = load double* %b.addr, align 8
+ %mul = fmul double %0, %1
+ ret double %mul
+}
+
+; static16: .ent __fn_stub_my_mul
+; static16: .set reorder
+; static16-NEXT: #NO_APP
+; static16: .end __fn_stub_my_mul
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
diff --git a/test/CodeGen/Mips/helloworld.ll b/test/CodeGen/Mips/helloworld.ll
index 83c88ae..058a041 100644
--- a/test/CodeGen/Mips/helloworld.ll
+++ b/test/CodeGen/Mips/helloworld.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C1
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C2
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PE
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST1
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PE
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST2
;
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR
-; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR32
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR32
@.str = private unnamed_addr constant [13 x i8] c"hello world\0A\00", align 1
@@ -26,9 +26,11 @@ entry:
; SR32: .set nomacro
; SR32: .set noat
; SR: save $ra, $s0, $s1, $s2, [[FS:[0-9]+]]
-; PE: li $[[T1:[0-9]+]], %hi(_gp_disp)
-; PE: addiu $[[T2:[0-9]+]], $pc, %lo(_gp_disp)
-; PE: sll $[[T3:[0-9]+]], $[[T1]], 16
+; PE: .ent main
+; PE: .align 2
+; PE-NEXT: li $[[T1:[0-9]+]], %hi(_gp_disp)
+; PE-NEXT: addiu $[[T2:[0-9]+]], $pc, %lo(_gp_disp)
+; PE: sll $[[T3:[0-9]+]], $[[T1]], 16
; C1: lw ${{[0-9]+}}, %got($.str)(${{[0-9]+}})
; C2: lw ${{[0-9]+}}, %call16(printf)(${{[0-9]+}})
; C1: addiu ${{[0-9]+}}, %lo($.str)
diff --git a/test/CodeGen/Mips/hf16call32.ll b/test/CodeGen/Mips/hf16call32.ll
index 934cf06..461438e 100644
--- a/test/CodeGen/Mips/hf16call32.ll
+++ b/test/CodeGen/Mips/hf16call32.ll
@@ -1026,5 +1026,5 @@ declare { double, double } @dc_sf(float) #1
; stel: jr $18
; stel: .end __call_stub_fp_dc_sf
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/hf16call32_body.ll b/test/CodeGen/Mips/hf16call32_body.ll
index 793b771..34bae26 100644
--- a/test/CodeGen/Mips/hf16call32_body.ll
+++ b/test/CodeGen/Mips/hf16call32_body.ll
@@ -20,7 +20,7 @@ entry:
}
; stel: .section .mips16.fn.v_sf,"ax",@progbits
; stel: .ent __fn_stub_v_sf
-; stel: la $25, v_sf
+; stel: la $25,v_sf
; stel: mfc1 $4,$f12
; stel: jr $25
; stel: __fn_local_v_sf = v_sf
@@ -40,7 +40,7 @@ entry:
; stel: .section .mips16.fn.v_df,"ax",@progbits
; stel: .ent __fn_stub_v_df
-; stel: la $25, v_df
+; stel: la $25,v_df
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: jr $25
@@ -63,7 +63,7 @@ entry:
; stel: .section .mips16.fn.v_sf_sf,"ax",@progbits
; stel: .ent __fn_stub_v_sf_sf
-; stel: la $25, v_sf_sf
+; stel: la $25,v_sf_sf
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f14
; stel: jr $25
@@ -86,7 +86,7 @@ entry:
; stel: .section .mips16.fn.v_sf_df,"ax",@progbits
; stel: .ent __fn_stub_v_sf_df
-; stel: la $25, v_sf_df
+; stel: la $25,v_sf_df
; stel: mfc1 $4,$f12
; stel: mfc1 $6,$f14
; stel: mfc1 $7,$f15
@@ -110,7 +110,7 @@ entry:
; stel: .section .mips16.fn.v_df_sf,"ax",@progbits
; stel: .ent __fn_stub_v_df_sf
-; stel: la $25, v_df_sf
+; stel: la $25,v_df_sf
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: mfc1 $6,$f14
@@ -134,7 +134,7 @@ entry:
; stel: .section .mips16.fn.v_df_df,"ax",@progbits
; stel: .ent __fn_stub_v_df_df
-; stel: la $25, v_df_df
+; stel: la $25,v_df_df
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: mfc1 $6,$f14
@@ -164,7 +164,7 @@ entry:
; stel: .section .mips16.fn.sf_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_sf
-; stel: la $25, sf_sf
+; stel: la $25,sf_sf
; stel: mfc1 $4,$f12
; stel: jr $25
; stel: __fn_local_sf_sf = sf_sf
@@ -184,7 +184,7 @@ entry:
; stel: .section .mips16.fn.sf_df,"ax",@progbits
; stel: .ent __fn_stub_sf_df
-; stel: la $25, sf_df
+; stel: la $25,sf_df
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: jr $25
@@ -208,7 +208,7 @@ entry:
; stel: .section .mips16.fn.sf_sf_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_sf_sf
-; stel: la $25, sf_sf_sf
+; stel: la $25,sf_sf_sf
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f14
; stel: jr $25
@@ -232,7 +232,7 @@ entry:
; stel: .section .mips16.fn.sf_sf_df,"ax",@progbits
; stel: .ent __fn_stub_sf_sf_df
-; stel: la $25, sf_sf_df
+; stel: la $25,sf_sf_df
; stel: mfc1 $4,$f12
; stel: mfc1 $6,$f14
; stel: mfc1 $7,$f15
@@ -257,7 +257,7 @@ entry:
; stel: .section .mips16.fn.sf_df_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_df_sf
-; stel: la $25, sf_df_sf
+; stel: la $25,sf_df_sf
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: mfc1 $6,$f14
@@ -282,7 +282,7 @@ entry:
; stel: .section .mips16.fn.sf_df_df,"ax",@progbits
; stel: .ent __fn_stub_sf_df_df
-; stel: la $25, sf_df_df
+; stel: la $25,sf_df_df
; stel: mfc1 $4,$f12
; stel: mfc1 $5,$f13
; stel: mfc1 $6,$f14
@@ -291,4 +291,4 @@ entry:
; stel: __fn_local_sf_df_df = sf_df_df
; stel: .end __fn_stub_sf_df_df
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/hf1_body.ll b/test/CodeGen/Mips/hf1_body.ll
new file mode 100644
index 0000000..b2cce92
--- /dev/null
+++ b/test/CodeGen/Mips/hf1_body.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=picfp16
+
+@x = external global float
+
+; Function Attrs: nounwind
+define void @v_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ ret void
+}
+; picfp16: .ent __fn_stub_v_sf
+; picfp16: .cpload $25
+; picfp16: .set reorder
+; picfp16: .reloc 0,R_MIPS_NONE,v_sf
+; picfp16: la $25,$__fn_local_v_sf
+; picfp16: mfc1 $4,$f12
+; picfp16: jr $25
+; picfp16: .end __fn_stub_v_sf
diff --git a/test/CodeGen/Mips/hfptrcall.ll b/test/CodeGen/Mips/hfptrcall.ll
index b1d36c0..25639da 100644
--- a/test/CodeGen/Mips/hfptrcall.ll
+++ b/test/CodeGen/Mips/hfptrcall.ll
@@ -118,8 +118,8 @@ entry:
declare i32 @printf(i8*, ...) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
-attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
diff --git a/test/CodeGen/Mips/i32k.ll b/test/CodeGen/Mips/i32k.ll
index c6da8b1..f4dd1eb 100644
--- a/test/CodeGen/Mips/i32k.ll
+++ b/test/CodeGen/Mips/i32k.ll
@@ -1,16 +1,23 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16a
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16b
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1
define i32 @main() nounwind {
entry:
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind
-; 16a: li ${{[0-9]+}}, 29905
-; 16b: li ${{[0-9]+}}, 16408
+; 16: lw ${{[0-9]+}}, 1f
+; 16: b 2f
+; 16: .align 2
+; 16: 1: .word 1075344593
+; 16: 2:
+
%call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind
-; 16a: li ${{[0-9]+}}, 49127
-; 16b: li ${{[0-9]+}}, 35631
+
+; 16: lw ${{[0-9]+}}, 1f
+; 16: b 2f
+; 16: .align 2
+; 16: 1: .word -1075344593
+; 16: 2:
ret i32 0
}
diff --git a/test/CodeGen/Mips/i64arg.ll b/test/CodeGen/Mips/i64arg.ll
index 0b16424..5b2d135 100644
--- a/test/CodeGen/Mips/i64arg.ll
+++ b/test/CodeGen/Mips/i64arg.ll
@@ -2,18 +2,18 @@
define void @f1(i64 %ll1, float %f, i64 %ll, i32 %i, float %f2) nounwind {
entry:
-; CHECK: move $[[R1:[0-9]+]], $5
-; CHECK: move $[[R0:[0-9]+]], $4
-; CHECK: ori $6, ${{[0-9]+}}, 3855
-; CHECK: ori $7, ${{[0-9]+}}, 22136
-; CHECK: lw $25, %call16(ff1)
+; CHECK-DAG: lw $[[R2:[0-9]+]], 80($sp)
+; CHECK-DAG: lw $[[R3:[0-9]+]], 84($sp)
+; CHECK-DAG: move $[[R1:[0-9]+]], $5
+; CHECK-DAG: move $[[R0:[0-9]+]], $4
+; CHECK-DAG: ori $6, ${{[0-9]+}}, 3855
+; CHECK-DAG: ori $7, ${{[0-9]+}}, 22136
+; CHECK-DAG: lw $25, %call16(ff1)
; CHECK: jalr
tail call void @ff1(i32 %i, i64 1085102592623924856) nounwind
-; CHECK: lw $25, %call16(ff2)
-; CHECK: lw $[[R2:[0-9]+]], 80($sp)
-; CHECK: lw $[[R3:[0-9]+]], 84($sp)
-; CHECK: move $4, $[[R2]]
-; CHECK: move $5, $[[R3]]
+; CHECK-DAG: lw $25, %call16(ff2)
+; CHECK-DAG: move $4, $[[R2]]
+; CHECK-DAG: move $5, $[[R3]]
; CHECK: jalr $25
tail call void @ff2(i64 %ll, double 3.000000e+00) nounwind
%sub = add nsw i32 %i, -1
diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll
index 1e96346..09fee3d 100644
--- a/test/CodeGen/Mips/largeimmprinting.ll
+++ b/test/CodeGen/Mips/largeimmprinting.ll
@@ -18,11 +18,11 @@ entry:
; 64: dsll $[[R0]], $[[R0]], 48
; 64: daddiu $[[R0]], $[[R0]], -1
; 64: dsll $[[R0]], $[[R0]], 16
-; 64: daddiu $[[R0]], $[[R0]], -48
+; 64: daddiu $[[R0]], $[[R0]], -32
; 64: daddu $sp, $sp, $[[R0]]
; 64: lui $[[R1:[0-9]+]], 1
; 64: daddu $[[R1]], $sp, $[[R1]]
-; 64: sd $ra, 40($[[R1]])
+; 64: sd $ra, 24($[[R1]])
%agg.tmp = alloca %struct.S1, align 1
%tmp = getelementptr inbounds %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
diff --git a/test/CodeGen/Mips/lazy-binding.ll b/test/CodeGen/Mips/lazy-binding.ll
new file mode 100644
index 0000000..839155a
--- /dev/null
+++ b/test/CodeGen/Mips/lazy-binding.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+; CHECK-LABEL: foo6:
+; CHECK: %while.body
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: %while.end
+
+define void @foo6(i32 %n) {
+entry:
+ %tobool1 = icmp eq i32 %n, 0
+ br i1 %tobool1, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %n.addr.02 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %dec = add nsw i32 %n.addr.02, -1
+ tail call void @foo2()
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare void @foo2()
+
+; CHECK-LABEL: foo1:
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+
+define void @foo1() {
+entry:
+ tail call void @foo2()
+ tail call void @foo2()
+ tail call void @foo2()
+ ret void
+}
diff --git a/test/CodeGen/Mips/lit.local.cfg b/test/CodeGen/Mips/lit.local.cfg
index e157c54..1fa54b4 100644
--- a/test/CodeGen/Mips/lit.local.cfg
+++ b/test/CodeGen/Mips/lit.local.cfg
@@ -1,5 +1,3 @@
-config.suffixes = ['.ll', '.c', '.cpp', '.test']
-
targets = set(config.root.targets_to_build.split())
if not 'Mips' in targets:
config.unsupported = True
diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll
index 1a4f79c..af192d0 100644
--- a/test/CodeGen/Mips/longbranch.ll
+++ b/test/CodeGen/Mips/longbranch.ll
@@ -1,13 +1,17 @@
-; RUN: llc -march=mipsel -force-mips-long-branch < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mipsel -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=O32
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=N64
@g0 = external global i32
define void @foo1(i32 %s) nounwind {
entry:
+; O32: nop
+; O32: addiu $sp, $sp, -8
; O32: bal
; O32: lui $1, 0
; O32: addiu $1, $1, {{[0-9]+}}
+; N64: nop
+; N64: daddiu $sp, $sp, -16
; N64: lui $1, 0
; N64: daddiu $1, $1, 0
; N64: dsll $1, $1, 16
diff --git a/test/CodeGen/Mips/mips16_32_1.ll b/test/CodeGen/Mips/mips16_32_1.ll
index 6f4826e..e156641 100644
--- a/test/CodeGen/Mips/mips16_32_1.ll
+++ b/test/CodeGen/Mips/mips16_32_1.ll
@@ -11,4 +11,4 @@ entry:
; CHECK: save {{.+}}
; CHECK: restore {{.+}}
; CHECK: .end foo
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_10.ll b/test/CodeGen/Mips/mips16_32_10.ll
index 330dbfe..7c017b8 100644
--- a/test/CodeGen/Mips/mips16_32_10.ll
+++ b/test/CodeGen/Mips/mips16_32_10.ll
@@ -54,6 +54,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_3.ll b/test/CodeGen/Mips/mips16_32_3.ll
index 8874a88..dd94ec1 100644
--- a/test/CodeGen/Mips/mips16_32_3.ll
+++ b/test/CodeGen/Mips/mips16_32_3.ll
@@ -65,6 +65,6 @@ entry:
; 32: .set reorder
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_4.ll b/test/CodeGen/Mips/mips16_32_4.ll
index cdaed6c..5e49071 100644
--- a/test/CodeGen/Mips/mips16_32_4.ll
+++ b/test/CodeGen/Mips/mips16_32_4.ll
@@ -60,6 +60,6 @@ entry:
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_5.ll b/test/CodeGen/Mips/mips16_32_5.ll
index 45e0bf4..17900a2 100644
--- a/test/CodeGen/Mips/mips16_32_5.ll
+++ b/test/CodeGen/Mips/mips16_32_5.ll
@@ -75,6 +75,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_6.ll b/test/CodeGen/Mips/mips16_32_6.ll
index f4b8e7a..a77031a 100644
--- a/test/CodeGen/Mips/mips16_32_6.ll
+++ b/test/CodeGen/Mips/mips16_32_6.ll
@@ -81,6 +81,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_7.ll b/test/CodeGen/Mips/mips16_32_7.ll
index f8726ea..895b5d4 100644
--- a/test/CodeGen/Mips/mips16_32_7.ll
+++ b/test/CodeGen/Mips/mips16_32_7.ll
@@ -71,6 +71,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_8.ll b/test/CodeGen/Mips/mips16_32_8.ll
index e51f296..4152d68 100644
--- a/test/CodeGen/Mips/mips16_32_8.ll
+++ b/test/CodeGen/Mips/mips16_32_8.ll
@@ -68,7 +68,7 @@ entry:
; 32: .set reorder
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_9.ll b/test/CodeGen/Mips/mips16_32_9.ll
index f5ff368..c9b494f 100644
--- a/test/CodeGen/Mips/mips16_32_9.ll
+++ b/test/CodeGen/Mips/mips16_32_9.ll
@@ -46,6 +46,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
index 7b06c2d..2894d69 100644
--- a/test/CodeGen/Mips/mips64instrs.ll
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -1,4 +1,7 @@
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck %s
+
+@gll0 = common global i64 0, align 8
+@gll1 = common global i64 0, align 8
define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
entry:
@@ -90,17 +93,21 @@ entry:
; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
; CHECK: teq $[[R0]], $zero, 7
; CHECK: mflo
- %div = sdiv i64 %a, %b
+ %0 = load i64* @gll0, align 8
+ %1 = load i64* @gll1, align 8
+ %div = sdiv i64 %0, %1
ret i64 %div
}
-define i64 @f15(i64 %a, i64 %b) nounwind readnone {
+define i64 @f15() nounwind readnone {
entry:
; CHECK-LABEL: f15:
; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
; CHECK: teq $[[R0]], $zero, 7
; CHECK: mflo
- %div = udiv i64 %a, %b
+ %0 = load i64* @gll0, align 8
+ %1 = load i64* @gll1, align 8
+ %div = udiv i64 %0, %1
ret i64 %div
}
@@ -148,4 +155,3 @@ entry:
%neg = xor i64 %or, -1
ret i64 %neg
}
-
diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
index be9d0b6..f4854f8 100644
--- a/test/CodeGen/Mips/mno-ldc1-sdc1.ll
+++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -1,22 +1,31 @@
-; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 < %s | \
-; RUN: FileCheck %s -check-prefix=LE-PIC
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s -check-prefix=LE-PIC
; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 < %s | \
; RUN: FileCheck %s -check-prefix=LE-STATIC
; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 < %s | \
; RUN: FileCheck %s -check-prefix=BE-PIC
-; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=CHECK-LDC1-SDC1
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=CHECK-LDC1-SDC1
@g0 = common global double 0.000000e+00, align 8
; LE-PIC-LABEL: test_ldc1:
-; LE-PIC: lwc1 $f0, 0(${{[0-9]+}})
-; LE-PIC: lwc1 $f1, 4(${{[0-9]+}})
+; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; LE-PIC-DAG: mtc1 $[[R0]], $f0
+; LE-PIC-DAG: mtc1 $[[R1]], $f1
; LE-STATIC-LABEL: test_ldc1:
-; LE-STATIC: lwc1 $f0, %lo(g0)(${{[0-9]+}})
-; LE-STATIC: lwc1 $f1, %lo(g0+4)(${{[0-9]+}})
+; LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; LE-STATIC-DAG: mtc1 $[[R3]], $f1
; BE-PIC-LABEL: test_ldc1:
-; BE-PIC: lwc1 $f1, 0(${{[0-9]+}})
-; BE-PIC: lwc1 $f0, 4(${{[0-9]+}})
+; BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; BE-PIC-DAG: mtc1 $[[R1]], $f0
+; BE-PIC-DAG: mtc1 $[[R0]], $f1
; CHECK-LDC1-SDC1-LABEL: test_ldc1:
; CHECK-LDC1-SDC1: ldc1 $f{{[0-9]+}}
@@ -27,14 +36,22 @@ entry:
}
; LE-PIC-LABEL: test_sdc1:
-; LE-PIC: swc1 $f12, 0(${{[0-9]+}})
-; LE-PIC: swc1 $f13, 4(${{[0-9]+}})
+; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
; LE-STATIC-LABEL: test_sdc1:
-; LE-STATIC: swc1 $f12, %lo(g0)(${{[0-9]+}})
-; LE-STATIC: swc1 $f13, %lo(g0+4)(${{[0-9]+}})
+; LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
; BE-PIC-LABEL: test_sdc1:
-; BE-PIC: swc1 $f13, 0(${{[0-9]+}})
-; BE-PIC: swc1 $f12, 4(${{[0-9]+}})
+; BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
; CHECK-LDC1-SDC1-LABEL: test_sdc1:
; CHECK-LDC1-SDC1: sdc1 $f{{[0-9]+}}
@@ -43,3 +60,34 @@ entry:
store double %a, double* @g0, align 8
ret void
}
+
+
+; LE-PIC-LABEL: test_ldxc1:
+; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; LE-PIC-DAG: mtc1 $[[R0]], $f0
+; LE-PIC-DAG: mtc1 $[[R1]], $f1
+; CHECK-LDC1-SDC1-LABEL: test_ldxc1:
+; CHECK-LDC1-SDC1: ldxc1 $f{{[0-9]+}}
+
+define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
+entry:
+ %arrayidx = getelementptr inbounds double* %a, i32 %i
+ %0 = load double* %arrayidx, align 8
+ ret double %0
+}
+
+; LE-PIC-LABEL: test_sdxc1:
+; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+; CHECK-LDC1-SDC1-LABEL: test_sdxc1:
+; CHECK-LDC1-SDC1: sdxc1 $f{{[0-9]+}}
+
+define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
+entry:
+ %arrayidx = getelementptr inbounds double* %a, i32 %i
+ store double %b, double* %arrayidx, align 8
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/2r.ll b/test/CodeGen/Mips/msa/2r.ll
new file mode 100644
index 0000000..da35ad8
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2r.ll
@@ -0,0 +1,257 @@
+; Test the MSA intrinsics that are encoded with the 2R instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_nloc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nloc_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nloc_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nloc_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_b_test
+;
+@llvm_mips_nloc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nloc_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nloc_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nloc_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_h_test
+;
+@llvm_mips_nloc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nloc_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nloc_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nloc_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_w_test
+;
+@llvm_mips_nloc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nloc_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nloc_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nloc_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_d_test
+;
+@llvm_mips_nlzc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nlzc_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nlzc_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nlzc_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_b_test
+;
+@llvm_mips_nlzc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nlzc_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nlzc_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nlzc_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_h_test
+;
+@llvm_mips_nlzc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nlzc_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nlzc_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nlzc_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_w_test
+;
+@llvm_mips_nlzc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nlzc_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nlzc_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nlzc_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_d_test
+;
+@llvm_mips_pcnt_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pcnt_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pcnt_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_pcnt_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_b_test
+;
+@llvm_mips_pcnt_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pcnt_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pcnt_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_pcnt_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_h_test
+;
+@llvm_mips_pcnt_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pcnt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pcnt_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_pcnt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_w_test
+;
+@llvm_mips_pcnt_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pcnt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pcnt_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pcnt.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_pcnt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
new file mode 100644
index 0000000..6f6e1b9
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
@@ -0,0 +1,87 @@
+; Test the MSA intrinsics that are encoded with the 2R instruction format and
+; convert scalars to vectors.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fill_b_ARG1 = global i32 23, align 16
+@llvm_mips_fill_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_fill_b_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.fill.b(i32) nounwind
+
+; CHECK: llvm_mips_fill_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.b [[R2]],
+; CHECK: .size llvm_mips_fill_b_test
+;
+@llvm_mips_fill_h_ARG1 = global i32 23, align 16
+@llvm_mips_fill_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_fill_h_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.fill.h(i32) nounwind
+
+; CHECK: llvm_mips_fill_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.h [[R2]],
+; CHECK: .size llvm_mips_fill_h_test
+;
+@llvm_mips_fill_w_ARG1 = global i32 23, align 16
+@llvm_mips_fill_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fill_w_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fill.w(i32) nounwind
+
+; CHECK: llvm_mips_fill_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.w [[R2]],
+; CHECK: .size llvm_mips_fill_w_test
+;
+@llvm_mips_fill_d_ARG1 = global i64 23, align 16
+@llvm_mips_fill_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fill_d_test() nounwind {
+entry:
+ %0 = load i64* @llvm_mips_fill_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fill.d(i64) nounwind
+
+; CHECK: llvm_mips_fill_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
+; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 0
+; CHECK-DAG: insert.w [[R3]][0], [[R1]]
+; CHECK-DAG: insert.w [[R3]][1], [[R2]]
+; CHECK-DAG: insert.w [[R3]][2], [[R1]]
+; CHECK-DAG: insert.w [[R3]][3], [[R2]]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_fill_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf.ll b/test/CodeGen/Mips/msa/2rf.ll
new file mode 100644
index 0000000..b361ef5
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf.ll
@@ -0,0 +1,323 @@
+; Test the MSA intrinsics that are encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_flog2_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_flog2_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_flog2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_flog2_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_flog2_w_test
+;
+@llvm_mips_flog2_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_flog2_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_flog2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_flog2_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_flog2_d_test
+
+define void @flog2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.log2.v4f32(<4 x float> %val)
+
+; CHECK: flog2_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size flog2_w_test
+
+define void @flog2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.log2.v2f64(<2 x double> %val)
+
+; CHECK: flog2_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size flog2_d_test
+;
+@llvm_mips_frint_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frint_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frint_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frint_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frint_w_test
+;
+@llvm_mips_frint_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frint_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frint_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frint_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frint_d_test
+
+define void @frint_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind
+
+; CHECK: frint_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size frint_w_test
+
+define void @frint_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind
+
+; CHECK: frint_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size frint_d_test
+;
+@llvm_mips_frcp_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frcp_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frcp_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frcp_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frcp_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frcp_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frcp.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frcp_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frcp_w_test
+;
+@llvm_mips_frcp_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frcp_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frcp_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frcp_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frcp_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frcp_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frcp.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frcp_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frcp_d_test
+;
+@llvm_mips_frsqrt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frsqrt_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frsqrt_w_test
+;
+@llvm_mips_frsqrt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frsqrt_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frsqrt_d_test
+;
+@llvm_mips_fsqrt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsqrt_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fsqrt_w_test
+;
+@llvm_mips_fsqrt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsqrt_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_fsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fsqrt_d_test
+
+define void @fsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind
+
+; CHECK: fsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size fsqrt_w_test
+
+define void @fsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) nounwind
+
+; CHECK: fsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size fsqrt_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_exup.ll b/test/CodeGen/Mips/msa/2rf_exup.ll
new file mode 100644
index 0000000..8d7cc36
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_exup.ll
@@ -0,0 +1,82 @@
+; Test the MSA floating point conversion intrinsics (e.g. float->double) that
+; are encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexupl_w_ARG1 = global <8 x half> <half 0.000000e+00, half 1.000000e+00, half 2.000000e+00, half 3.000000e+00, half 4.000000e+00, half 5.000000e+00, half 6.000000e+00, half 7.000000e+00>, align 16
+@llvm_mips_fexupl_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupl_w_test() nounwind {
+entry:
+ %0 = load <8 x half>* @llvm_mips_fexupl_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind
+
+; CHECK: llvm_mips_fexupl_w_test:
+; CHECK: ld.h
+; CHECK: fexupl.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexupl_w_test
+;
+@llvm_mips_fexupl_d_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexupl_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupl_d_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexupl_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fexupl_d_test:
+; CHECK: ld.w
+; CHECK: fexupl.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexupl_d_test
+;
+@llvm_mips_fexupr_w_ARG1 = global <8 x half> <half 0.000000e+00, half 1.000000e+00, half 2.000000e+00, half 3.000000e+00, half 4.000000e+00, half 5.000000e+00, half 6.000000e+00, half 7.000000e+00>, align 16
+@llvm_mips_fexupr_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupr_w_test() nounwind {
+entry:
+ %0 = load <8 x half>* @llvm_mips_fexupr_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind
+
+; CHECK: llvm_mips_fexupr_w_test:
+; CHECK: ld.h
+; CHECK: fexupr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexupr_w_test
+;
+@llvm_mips_fexupr_d_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexupr_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupr_d_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexupr_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexupr.d(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fexupr_d_test:
+; CHECK: ld.w
+; CHECK: fexupr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexupr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_float_int.ll b/test/CodeGen/Mips/msa/2rf_float_int.ll
new file mode 100644
index 0000000..3b5dfda
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_float_int.ll
@@ -0,0 +1,90 @@
+; Test the MSA integer to floating point conversion intrinsics that are encoded
+; with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ffint_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffint_s_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffint_s_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffint_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_s_w_test
+;
+@llvm_mips_ffint_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ffint_s_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ffint_s_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_ffint_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_s_d_test
+;
+@llvm_mips_ffint_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffint_u_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffint_u_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffint_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_u_w_test
+;
+@llvm_mips_ffint_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ffint_u_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ffint_u_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffint.u.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_ffint_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_fq.ll b/test/CodeGen/Mips/msa/2rf_fq.ll
new file mode 100644
index 0000000..021dd93
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_fq.ll
@@ -0,0 +1,82 @@
+; Test the MSA fixed-point to floating point conversion intrinsics that are
+; encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ffql_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ffql_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffql_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ffql_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_ffql_w_test:
+; CHECK: ld.h
+; CHECK: ffql.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ffql_w_test
+;
+@llvm_mips_ffql_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffql_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffql_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffql_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffql_d_test:
+; CHECK: ld.w
+; CHECK: ffql.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ffql_d_test
+;
+@llvm_mips_ffqr_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ffqr_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffqr_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ffqr_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_ffqr_w_test:
+; CHECK: ld.h
+; CHECK: ffqr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ffqr_w_test
+;
+@llvm_mips_ffqr_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffqr_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffqr_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffqr_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffqr.d(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffqr_d_test:
+; CHECK: ld.w
+; CHECK: ffqr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ffqr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_int_float.ll b/test/CodeGen/Mips/msa/2rf_int_float.ll
new file mode 100644
index 0000000..4665ae0
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_int_float.ll
@@ -0,0 +1,217 @@
+; Test the MSA floating point to integer intrinsics that are encoded with the
+; 2RF instruction format. This includes conversions but other instructions such
+; as fclass are also here.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fclass_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fclass_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fclass_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fclass_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fclass_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fclass.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fclass_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fclass_w_test
+;
+@llvm_mips_fclass_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fclass_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fclass_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_fclass_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fclass_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fclass.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fclass_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fclass_d_test
+;
+@llvm_mips_ftrunc_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftrunc_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftrunc_s_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftrunc_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_s_w_test
+;
+@llvm_mips_ftrunc_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftrunc_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftrunc_s_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftrunc_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_s_d_test
+;
+@llvm_mips_ftrunc_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftrunc_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftrunc_u_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftrunc_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_u_w_test
+;
+@llvm_mips_ftrunc_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftrunc_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftrunc_u_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftrunc_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_u_d_test
+;
+@llvm_mips_ftint_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftint_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftint_s_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftint_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_s_w_test
+;
+@llvm_mips_ftint_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftint_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftint_s_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftint_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_s_d_test
+;
+@llvm_mips_ftint_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftint_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftint_u_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftint_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_u_w_test
+;
+@llvm_mips_ftint_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftint_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftint_u_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftint.u.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftint_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_tq.ll b/test/CodeGen/Mips/msa/2rf_tq.ll
new file mode 100644
index 0000000..6f3c508
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_tq.ll
@@ -0,0 +1,50 @@
+; Test the MSA floating-point to fixed-point conversion intrinsics that are
+; encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ftq_h_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftq_h_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_ftq_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ftq_h_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftq_h_ARG1
+ %1 = load <4 x float>* @llvm_mips_ftq_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_ftq_h_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ftq.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ftq_h_test
+;
+@llvm_mips_ftq_w_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftq_w_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_ftq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftq_w_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftq_w_ARG1
+ %1 = load <2 x double>* @llvm_mips_ftq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftq.w(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_ftq_w_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ftq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ftq_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-a.ll b/test/CodeGen/Mips/msa/3r-a.ll
new file mode 100644
index 0000000..dab15b6
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-a.ll
@@ -0,0 +1,1191 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'a'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+; It should fail to compile without fp64.
+; RUN: not llc -march=mips -mattr=+msa < %s 2>&1 | \
+; RUN: FileCheck -check-prefix=FP32ERROR %s
+; FP32ERROR: LLVM ERROR: MSA requires a 64-bit FPU register file (FR=1 mode).
+
+@llvm_mips_add_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_add_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_add_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_add_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_add_a_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_b_test
+;
+@llvm_mips_add_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_add_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_add_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_add_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_add_a_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_h_test
+;
+@llvm_mips_add_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_add_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_add_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_add_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_add_a_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_w_test
+;
+@llvm_mips_add_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_add_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_add_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_add_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_add_a_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_d_test
+;
+@llvm_mips_adds_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_a_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_b_test
+;
+@llvm_mips_adds_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_a_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_h_test
+;
+@llvm_mips_adds_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_a_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_w_test
+;
+@llvm_mips_adds_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_a_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_d_test
+;
+@llvm_mips_adds_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_b_test
+;
+@llvm_mips_adds_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_h_test
+;
+@llvm_mips_adds_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_w_test
+;
+@llvm_mips_adds_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_d_test
+;
+@llvm_mips_adds_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_b_test
+;
+@llvm_mips_adds_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_h_test
+;
+@llvm_mips_adds_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_w_test
+;
+@llvm_mips_adds_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_d_test
+;
+@llvm_mips_addv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_addv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_addv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_addv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_addv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_b_test
+;
+@llvm_mips_addv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_addv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_addv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_addv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_addv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_h_test
+;
+@llvm_mips_addv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_addv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_addv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_addv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_addv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_w_test
+;
+@llvm_mips_addv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_addv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_addv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_addv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_addv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_d_test
+;
+
+define void @addv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %2 = add <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ ret void
+}
+
+; CHECK: addv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size addv_b_test
+;
+
+define void @addv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %2 = add <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ ret void
+}
+
+; CHECK: addv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size addv_h_test
+;
+
+define void @addv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %2 = add <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ ret void
+}
+
+; CHECK: addv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size addv_w_test
+;
+
+define void @addv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %2 = add <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ ret void
+}
+
+; CHECK: addv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size addv_d_test
+;
+@llvm_mips_asub_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_asub_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_asub_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_asub_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_asub_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_b_test
+;
+@llvm_mips_asub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_asub_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_asub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_asub_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_asub_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_h_test
+;
+@llvm_mips_asub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_asub_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_asub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_asub_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_asub_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_w_test
+;
+@llvm_mips_asub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_asub_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_asub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_asub_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_asub_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_d_test
+;
+@llvm_mips_asub_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_asub_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_asub_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_asub_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_asub_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_b_test
+;
+@llvm_mips_asub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_asub_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_asub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_asub_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_asub_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_h_test
+;
+@llvm_mips_asub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_asub_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_asub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_asub_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_asub_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_w_test
+;
+@llvm_mips_asub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_asub_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_asub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_asub_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_asub_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_d_test
+;
+@llvm_mips_ave_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ave_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ave_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ave_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ave_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_b_test
+;
+@llvm_mips_ave_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ave_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ave_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ave_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ave_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_h_test
+;
+@llvm_mips_ave_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ave_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ave_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ave_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ave_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_w_test
+;
+@llvm_mips_ave_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ave_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ave_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ave_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ave_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_d_test
+;
+@llvm_mips_ave_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ave_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ave_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ave_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ave_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_b_test
+;
+@llvm_mips_ave_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ave_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ave_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ave_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ave_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_h_test
+;
+@llvm_mips_ave_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ave_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ave_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ave_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ave_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_w_test
+;
+@llvm_mips_ave_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ave_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ave_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ave_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ave_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_d_test
+;
+@llvm_mips_aver_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_aver_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_aver_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_aver_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_aver_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_b_test
+;
+@llvm_mips_aver_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_aver_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_aver_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_aver_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_aver_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_h_test
+;
+@llvm_mips_aver_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_aver_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_aver_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_aver_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_aver_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_w_test
+;
+@llvm_mips_aver_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_aver_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_aver_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_aver_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_aver_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_d_test
+;
+@llvm_mips_aver_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_aver_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_aver_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_aver_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_aver_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_b_test
+;
+@llvm_mips_aver_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_aver_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_aver_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_aver_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_aver_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_h_test
+;
+@llvm_mips_aver_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_aver_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_aver_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_aver_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_aver_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_w_test
+;
+@llvm_mips_aver_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_aver_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_aver_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_aver_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.aver.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_aver_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-b.ll b/test/CodeGen/Mips/msa/3r-b.ll
new file mode 100644
index 0000000..a05d19b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-b.ll
@@ -0,0 +1,494 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'b'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bclr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bclr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bclr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bclr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bclr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bclr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bclr_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bclr.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bclr_b_test
+;
+@llvm_mips_bclr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bclr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bclr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bclr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bclr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bclr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bclr_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bclr.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bclr_h_test
+;
+@llvm_mips_bclr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bclr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bclr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bclr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bclr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bclr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bclr_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bclr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bclr_w_test
+;
+@llvm_mips_bclr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bclr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bclr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bclr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bclr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bclr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bclr_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bclr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bclr_d_test
+
+@llvm_mips_binsl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsl_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsl_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_binsl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_binsl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsl_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_binsl_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_binsl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG3)(
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.b [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_b_test
+
+@llvm_mips_binsl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsl_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsl_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_binsl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_binsl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsl_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_binsl_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_binsl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG3)(
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.h [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.h [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.h [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_h_test
+
+@llvm_mips_binsl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsl_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsl_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_binsl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_binsl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsl_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_binsl_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_binsl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG3)(
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.w [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.w [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.w [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_w_test
+
+@llvm_mips_binsl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsl_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsl_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_binsl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_binsl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsl_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_binsl_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_binsl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG3)(
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.d [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.d [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.d [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_d_test
+
+@llvm_mips_binsr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsr_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsr_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_binsr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_binsr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsr_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_binsr_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_binsr_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG3)(
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.b [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_b_test
+
+@llvm_mips_binsr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsr_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsr_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_binsr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_binsr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsr_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_binsr_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_binsr_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG3)(
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.h [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.h [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.h [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_h_test
+
+@llvm_mips_binsr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsr_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsr_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_binsr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_binsr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsr_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_binsr_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_binsr_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG3)(
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.w [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.w [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.w [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_w_test
+
+@llvm_mips_binsr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsr_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsr_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_binsr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_binsr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsr_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_binsr_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_binsr_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG3)(
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.d [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.d [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.d [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_d_test
+
+@llvm_mips_bneg_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bneg_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bneg_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bneg_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bneg_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bneg_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bneg_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bneg.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bneg_b_test
+;
+@llvm_mips_bneg_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bneg_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bneg_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bneg_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bneg_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bneg_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bneg_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bneg.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bneg_h_test
+;
+@llvm_mips_bneg_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bneg_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bneg_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bneg_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bneg_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bneg_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bneg_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bneg.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bneg_w_test
+;
+@llvm_mips_bneg_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bneg_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bneg_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bneg_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bneg_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bneg_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bneg_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bneg.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bneg_d_test
+;
+@llvm_mips_bset_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bset_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bset_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bset_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bset_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bset_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bset_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bset.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bset_b_test
+;
+@llvm_mips_bset_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bset_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bset_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bset_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bset_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bset_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bset_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bset.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bset_h_test
+;
+@llvm_mips_bset_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bset_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bset_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bset_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bset_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bset_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bset_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bset.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bset_w_test
+;
+@llvm_mips_bset_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bset_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bset_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bset_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bset_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bset_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bset.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bset_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bset.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bset_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-c.ll b/test/CodeGen/Mips/msa/3r-c.ll
new file mode 100644
index 0000000..6ec92c2
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-c.ll
@@ -0,0 +1,446 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'c'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ceq_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ceq_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ceq_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ceq_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ceq_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ceq.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ceq_b_test
+;
+@llvm_mips_ceq_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ceq_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ceq_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ceq_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ceq_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ceq.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ceq_h_test
+;
+@llvm_mips_ceq_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ceq_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ceq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ceq_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ceq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ceq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ceq_w_test
+;
+@llvm_mips_ceq_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ceq_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ceq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ceq_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ceq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ceq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ceq_d_test
+;
+@llvm_mips_cle_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_cle_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_cle_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_cle_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_cle_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: cle_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_cle_s_b_test
+;
+@llvm_mips_cle_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_cle_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_cle_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_cle_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_cle_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: cle_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_cle_s_h_test
+;
+@llvm_mips_cle_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_cle_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_cle_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_cle_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_cle_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: cle_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_cle_s_w_test
+;
+@llvm_mips_cle_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_cle_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_cle_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_cle_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_cle_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: cle_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_cle_s_d_test
+;
+@llvm_mips_cle_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_cle_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_cle_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_cle_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_cle_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: cle_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_cle_u_b_test
+;
+@llvm_mips_cle_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_cle_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_cle_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_cle_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_cle_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: cle_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_cle_u_h_test
+;
+@llvm_mips_cle_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_cle_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_cle_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_cle_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_cle_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: cle_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_cle_u_w_test
+;
+@llvm_mips_cle_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_cle_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_cle_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_cle_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_cle_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: cle_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_cle_u_d_test
+;
+@llvm_mips_clt_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clt_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_clt_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clt_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_clt_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: clt_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clt_s_b_test
+;
+@llvm_mips_clt_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clt_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_clt_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clt_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_clt_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: clt_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clt_s_h_test
+;
+@llvm_mips_clt_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clt_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_clt_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clt_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_clt_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: clt_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clt_s_w_test
+;
+@llvm_mips_clt_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clt_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_clt_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clt_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_clt_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: clt_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clt_s_d_test
+;
+@llvm_mips_clt_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clt_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_clt_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clt_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_clt_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: clt_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clt_u_b_test
+;
+@llvm_mips_clt_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clt_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_clt_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clt_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_clt_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: clt_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clt_u_h_test
+;
+@llvm_mips_clt_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clt_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_clt_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clt_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_clt_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: clt_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clt_u_w_test
+;
+@llvm_mips_clt_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clt_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_clt_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clt_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clt.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_clt_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: clt_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clt_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-d.ll b/test/CodeGen/Mips/msa/3r-d.ll
new file mode 100644
index 0000000..0099554
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-d.ll
@@ -0,0 +1,478 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'd'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_div_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_div_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_div_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_div_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_div_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_div_s_b_test
+;
+@llvm_mips_div_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_div_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_div_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_div_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_div_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_div_s_h_test
+;
+@llvm_mips_div_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_div_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_div_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_div_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_div_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_div_s_w_test
+;
+@llvm_mips_div_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_div_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_div_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_div_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_div_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_div_s_d_test
+;
+
+define void @div_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %2 = sdiv <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ ret void
+}
+
+; CHECK: div_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_s.b
+; CHECK: st.b
+; CHECK: .size div_s_b_test
+
+define void @div_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %2 = sdiv <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ ret void
+}
+
+; CHECK: div_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_s.h
+; CHECK: st.h
+; CHECK: .size div_s_h_test
+
+define void @div_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %2 = sdiv <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ ret void
+}
+
+; CHECK: div_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_s.w
+; CHECK: st.w
+; CHECK: .size div_s_w_test
+
+define void @div_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %2 = sdiv <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ ret void
+}
+
+; CHECK: div_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_s.d
+; CHECK: st.d
+; CHECK: .size div_s_d_test
+;
+@llvm_mips_div_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_div_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_div_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_div_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_div_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_div_u_b_test
+;
+@llvm_mips_div_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_div_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_div_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_div_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_div_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_div_u_h_test
+;
+@llvm_mips_div_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_div_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_div_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_div_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_div_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_div_u_w_test
+;
+@llvm_mips_div_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_div_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_div_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_div_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_div_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_div_u_d_test
+;
+
+define void @div_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %2 = udiv <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ ret void
+}
+
+; CHECK: div_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_u.b
+; CHECK: st.b
+; CHECK: .size div_u_b_test
+
+define void @div_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %2 = udiv <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ ret void
+}
+
+; CHECK: div_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_u.h
+; CHECK: st.h
+; CHECK: .size div_u_h_test
+
+define void @div_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %2 = udiv <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ ret void
+}
+
+; CHECK: div_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_u.w
+; CHECK: st.w
+; CHECK: .size div_u_w_test
+
+define void @div_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %2 = udiv <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ ret void
+}
+
+; CHECK: div_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_u.d
+; CHECK: st.d
+; CHECK: .size div_u_d_test
+;
+@llvm_mips_dotp_s_h_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3,
+ i8 4, i8 5, i8 6, i8 7,
+ i8 8, i8 9, i8 10, i8 11,
+ i8 12, i8 13, i8 14, i8 15>,
+ align 16
+@llvm_mips_dotp_s_h_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19,
+ i8 20, i8 21, i8 22, i8 23,
+ i8 24, i8 25, i8 26, i8 27,
+ i8 28, i8 29, i8 30, i8 31>,
+ align 16
+@llvm_mips_dotp_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0,
+ i16 0, i16 0, i16 0, i16 0>,
+ align 16
+
+define void @llvm_mips_dotp_s_h_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dotp_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: dotp_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dotp_s_h_test
+;
+@llvm_mips_dotp_s_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3,
+ i16 4, i16 5, i16 6, i16 7>,
+ align 16
+@llvm_mips_dotp_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7,
+ i16 8, i16 9, i16 10, i16 11>,
+ align 16
+@llvm_mips_dotp_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>,
+ align 16
+
+define void @llvm_mips_dotp_s_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dotp_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: dotp_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dotp_s_w_test
+;
+@llvm_mips_dotp_s_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 0, i32 1>,
+ align 16
+@llvm_mips_dotp_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 2, i32 3>,
+ align 16
+@llvm_mips_dotp_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dotp_s_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dotp_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: dotp_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dotp_s_d_test
+;
+@llvm_mips_dotp_u_h_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3,
+ i8 4, i8 5, i8 6, i8 7,
+ i8 8, i8 9, i8 10, i8 11,
+ i8 12, i8 13, i8 14, i8 15>,
+ align 16
+@llvm_mips_dotp_u_h_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19,
+ i8 20, i8 21, i8 22, i8 23,
+ i8 24, i8 25, i8 26, i8 27,
+ i8 28, i8 29, i8 30, i8 31>,
+ align 16
+@llvm_mips_dotp_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0,
+ i16 0, i16 0, i16 0, i16 0>,
+ align 16
+
+define void @llvm_mips_dotp_u_h_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dotp_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: dotp_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dotp_u_h_test
+;
+@llvm_mips_dotp_u_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3,
+ i16 4, i16 5, i16 6, i16 7>,
+ align 16
+@llvm_mips_dotp_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7,
+ i16 8, i16 9, i16 10, i16 11>,
+ align 16
+@llvm_mips_dotp_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>,
+ align 16
+
+define void @llvm_mips_dotp_u_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dotp_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: dotp_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dotp_u_w_test
+;
+@llvm_mips_dotp_u_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 0, i32 1>,
+ align 16
+@llvm_mips_dotp_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 2, i32 3>,
+ align 16
+@llvm_mips_dotp_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dotp_u_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dotp.u.d(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dotp_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: dotp_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dotp_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-i.ll b/test/CodeGen/Mips/msa/3r-i.ll
new file mode 100644
index 0000000..2ef3047
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-i.ll
@@ -0,0 +1,358 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'i'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ilvev_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvev_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvev_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvev_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvev_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvev_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvev_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvev.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvev_b_test
+;
+@llvm_mips_ilvev_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvev_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvev_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvev_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvev_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvev_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvev_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvev.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvev_h_test
+;
+@llvm_mips_ilvev_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvev_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvev_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvev_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvev_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvev_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvev_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvev.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvev_w_test
+;
+@llvm_mips_ilvev_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvev_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvev_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvev_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvev_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvev_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvev_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvev.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvev_d_test
+;
+@llvm_mips_ilvl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvl_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvl_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvl.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvl_b_test
+;
+@llvm_mips_ilvl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvl_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvl_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvl.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvl_h_test
+;
+@llvm_mips_ilvl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvl_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvl_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvl.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvl_w_test
+;
+@llvm_mips_ilvl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvl_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvl_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvl.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvl_d_test
+;
+@llvm_mips_ilvod_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvod_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvod_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvod_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvod_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvod_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvod_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvod.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvod_b_test
+;
+@llvm_mips_ilvod_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvod_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvod_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvod_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvod_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvod_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvod_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvod.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvod_h_test
+;
+@llvm_mips_ilvod_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvod_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvod_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvod_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvod_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvod_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvod_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvod.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvod_w_test
+;
+@llvm_mips_ilvod_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvod_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvod_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvod_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvod_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvod_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvod_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvod.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvod_d_test
+;
+@llvm_mips_ilvr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvr_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvr.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvr_b_test
+;
+@llvm_mips_ilvr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvr_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvr.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvr_h_test
+;
+@llvm_mips_ilvr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvr_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvr_w_test
+;
+@llvm_mips_ilvr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvr_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-m.ll b/test/CodeGen/Mips/msa/3r-m.ll
new file mode 100644
index 0000000..ddfd720
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-m.ll
@@ -0,0 +1,862 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'm'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_max_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_a_b_test
+;
+@llvm_mips_max_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_a_h_test
+;
+@llvm_mips_max_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_a_w_test
+;
+@llvm_mips_max_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_a_d_test
+;
+@llvm_mips_max_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_s_b_test
+;
+@llvm_mips_max_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_s_h_test
+;
+@llvm_mips_max_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_s_w_test
+;
+@llvm_mips_max_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_s_d_test
+;
+@llvm_mips_max_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_u_b_test
+;
+@llvm_mips_max_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_u_h_test
+;
+@llvm_mips_max_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_u_w_test
+;
+@llvm_mips_max_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_u_d_test
+;
+@llvm_mips_min_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_a_b_test
+;
+@llvm_mips_min_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_a_h_test
+;
+@llvm_mips_min_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_a_w_test
+;
+@llvm_mips_min_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_a_d_test
+;
+@llvm_mips_min_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_s_b_test
+;
+@llvm_mips_min_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_s_h_test
+;
+@llvm_mips_min_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_s_w_test
+;
+@llvm_mips_min_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_s_d_test
+;
+@llvm_mips_min_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_u_b_test
+;
+@llvm_mips_min_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_u_h_test
+;
+@llvm_mips_min_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_u_w_test
+;
+@llvm_mips_min_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_u_d_test
+;
+@llvm_mips_mod_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mod_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mod_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_s_b_test
+;
+@llvm_mips_mod_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mod_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mod_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_s_h_test
+;
+@llvm_mips_mod_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mod_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mod_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_s_w_test
+;
+@llvm_mips_mod_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mod_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mod_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_s_d_test
+;
+@llvm_mips_mod_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mod_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mod_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_u_b_test
+;
+@llvm_mips_mod_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mod_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mod_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_u_h_test
+;
+@llvm_mips_mod_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mod_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mod_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_u_w_test
+;
+@llvm_mips_mod_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mod_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mod_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_u_d_test
+;
+@llvm_mips_mulv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mulv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mulv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mulv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mulv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mulv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mulv_b_test
+;
+@llvm_mips_mulv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mulv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mulv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulv_h_test
+;
+@llvm_mips_mulv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mulv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mulv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulv_w_test
+;
+@llvm_mips_mulv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mulv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mulv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mulv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mulv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mulv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mulv_d_test
+
+define void @mulv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %2 = mul <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ ret void
+}
+
+; CHECK: mulv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mulv.b
+; CHECK: st.b
+; CHECK: .size mulv_b_test
+
+define void @mulv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %2 = mul <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ ret void
+}
+
+; CHECK: mulv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulv.h
+; CHECK: st.h
+; CHECK: .size mulv_h_test
+
+define void @mulv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %2 = mul <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ ret void
+}
+
+; CHECK: mulv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulv.w
+; CHECK: st.w
+; CHECK: .size mulv_w_test
+
+define void @mulv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %2 = mul <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ ret void
+}
+
+; CHECK: mulv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mulv.d
+; CHECK: st.d
+; CHECK: .size mulv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-p.ll b/test/CodeGen/Mips/msa/3r-p.ll
new file mode 100644
index 0000000..852023b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-p.ll
@@ -0,0 +1,182 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'p'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_pckev_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pckev_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_pckev_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckev_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckev_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckev.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckev_b_test
+;
+@llvm_mips_pckev_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pckev_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_pckev_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckev_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckev_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckev.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckev_h_test
+;
+@llvm_mips_pckev_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pckev_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_pckev_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckev_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckev_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckev.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckev_w_test
+;
+@llvm_mips_pckev_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pckev_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_pckev_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckev_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckev_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckev.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckev_d_test
+;
+@llvm_mips_pckod_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pckod_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_pckod_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckod_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckod_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckod.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckod_b_test
+;
+@llvm_mips_pckod_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pckod_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_pckod_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckod_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckod_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckod.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckod_h_test
+;
+@llvm_mips_pckod_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pckod_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_pckod_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckod_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckod_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckod.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckod_w_test
+;
+@llvm_mips_pckod_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pckod_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_pckod_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckod_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pckod.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckod_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckod.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckod_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll
new file mode 100644
index 0000000..30cf265
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-s.ll
@@ -0,0 +1,1353 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 's'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sld_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sld_b_ARG2 = global i32 10, align 16
+@llvm_mips_sld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sld_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1
+ %1 = load i32* @llvm_mips_sld_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, i32 %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sld_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sld_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.b [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sld_b_test
+;
+@llvm_mips_sld_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sld_h_ARG2 = global i32 10, align 16
+@llvm_mips_sld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sld_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1
+ %1 = load i32* @llvm_mips_sld_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, i32 %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sld_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sld_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_h_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.h [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sld_h_test
+;
+@llvm_mips_sld_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sld_w_ARG2 = global i32 10, align 16
+@llvm_mips_sld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sld_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1
+ %1 = load i32* @llvm_mips_sld_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, i32 %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sld_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sld_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_w_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.w [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sld_w_test
+;
+@llvm_mips_sld_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sld_d_ARG2 = global i32 10, align 16
+@llvm_mips_sld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sld_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1
+ %1 = load i32* @llvm_mips_sld_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, i32 %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sld_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sld_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_d_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.d [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sld_d_test
+;
+@llvm_mips_sll_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sll_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_sll_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sll_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sll_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sll_b_test
+;
+@llvm_mips_sll_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sll_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_sll_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sll_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sll_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sll_h_test
+;
+@llvm_mips_sll_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sll_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_sll_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sll_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sll_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sll_w_test
+;
+@llvm_mips_sll_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sll_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_sll_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sll_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sll_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sll_d_test
+
+define void @sll_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %2 = shl <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ ret void
+}
+
+; CHECK: sll_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size sll_b_test
+
+define void @sll_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %2 = shl <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ ret void
+}
+
+; CHECK: sll_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size sll_h_test
+
+define void @sll_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %2 = shl <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ ret void
+}
+
+; CHECK: sll_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size sll_w_test
+
+define void @sll_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %2 = shl <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ ret void
+}
+
+; CHECK: sll_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size sll_d_test
+;
+@llvm_mips_sra_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sra_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_sra_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sra_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sra_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sra_b_test
+;
+@llvm_mips_sra_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sra_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_sra_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sra_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sra_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sra_h_test
+;
+@llvm_mips_sra_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sra_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_sra_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sra_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sra_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sra_w_test
+;
+@llvm_mips_sra_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sra_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_sra_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sra_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sra_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sra_d_test
+;
+
+define void @sra_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %2 = ashr <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ ret void
+}
+
+; CHECK: sra_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size sra_b_test
+
+define void @sra_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %2 = ashr <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ ret void
+}
+
+; CHECK: sra_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size sra_h_test
+
+define void @sra_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %2 = ashr <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ ret void
+}
+
+; CHECK: sra_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size sra_w_test
+
+define void @sra_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %2 = ashr <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ ret void
+}
+
+; CHECK: sra_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size sra_d_test
+
+@llvm_mips_srar_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srar_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srar_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srar_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srar_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srar_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srar_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srar_b_test
+;
+@llvm_mips_srar_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srar_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srar_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srar_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srar_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srar_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srar_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srar_h_test
+;
+@llvm_mips_srar_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srar_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srar_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srar_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srar_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srar_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srar_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srar_w_test
+;
+@llvm_mips_srar_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srar_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srar_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srar_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srar_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srar_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srar_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srar_d_test
+;
+@llvm_mips_srl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srl_b_test
+;
+@llvm_mips_srl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srl_h_test
+;
+@llvm_mips_srl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srl_w_test
+;
+@llvm_mips_srl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srl_d_test
+;
+@llvm_mips_srlr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srlr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srlr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srlr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srlr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srlr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srlr_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srlr_b_test
+;
+@llvm_mips_srlr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srlr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srlr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srlr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srlr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srlr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srlr_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srlr_h_test
+;
+@llvm_mips_srlr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srlr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srlr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srlr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srlr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srlr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srlr_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srlr_w_test
+;
+@llvm_mips_srlr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srlr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srlr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srlr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srlr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srlr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srlr_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srlr_d_test
+;
+
+define void @srl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %2 = lshr <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ ret void
+}
+
+; CHECK: srl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size srl_b_test
+
+define void @srl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %2 = lshr <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ ret void
+}
+
+; CHECK: srl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size srl_h_test
+
+define void @srl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %2 = lshr <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ ret void
+}
+
+; CHECK: srl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size srl_w_test
+
+define void @srl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %2 = lshr <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ ret void
+}
+
+; CHECK: srl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size srl_d_test
+
+@llvm_mips_subs_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subs_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subs_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subs_s_b_test
+;
+@llvm_mips_subs_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subs_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subs_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subs_s_h_test
+;
+@llvm_mips_subs_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subs_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subs_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subs_s_w_test
+;
+@llvm_mips_subs_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subs_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subs_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subs_s_d_test
+;
+@llvm_mips_subs_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subs_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subs_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subs_u_b_test
+;
+@llvm_mips_subs_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subs_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subs_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subs_u_h_test
+;
+@llvm_mips_subs_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subs_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subs_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subs_u_w_test
+;
+@llvm_mips_subs_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subs_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subs_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subs_u_d_test
+;
+@llvm_mips_subsus_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subsus_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subsus_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsus_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsus_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subsus_u_b_test
+;
+@llvm_mips_subsus_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subsus_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subsus_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsus_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsus_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subsus_u_h_test
+;
+@llvm_mips_subsus_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subsus_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subsus_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsus_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsus_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subsus_u_w_test
+;
+@llvm_mips_subsus_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subsus_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subsus_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsus_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsus_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subsus_u_d_test
+;
+@llvm_mips_subsuu_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subsuu_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subsuu_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsuu_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_b_test
+;
+@llvm_mips_subsuu_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subsuu_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subsuu_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsuu_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_h_test
+;
+@llvm_mips_subsuu_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subsuu_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subsuu_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsuu_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_w_test
+;
+@llvm_mips_subsuu_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subsuu_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subsuu_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsuu_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_d_test
+;
+@llvm_mips_subv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subv_b_test
+;
+@llvm_mips_subv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subv_h_test
+;
+@llvm_mips_subv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subv_w_test
+;
+@llvm_mips_subv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subv_d_test
+;
+
+define void @subv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %2 = sub <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ ret void
+}
+
+; CHECK: subv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size subv_b_test
+
+define void @subv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %2 = sub <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ ret void
+}
+
+; CHECK: subv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size subv_h_test
+
+define void @subv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %2 = sub <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ ret void
+}
+
+; CHECK: subv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size subv_w_test
+
+define void @subv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %2 = sub <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ ret void
+}
+
+; CHECK: subv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size subv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-v.ll b/test/CodeGen/Mips/msa/3r-v.ll
new file mode 100644
index 0000000..c9693f9
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-v.ll
@@ -0,0 +1,105 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'v'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_vshf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_vshf_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_vshf_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_vshf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_vshf_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_vshf_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: vshf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_vshf_b_test
+;
+@llvm_mips_vshf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_vshf_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_vshf_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_vshf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_vshf_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_vshf_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: vshf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_vshf_h_test
+;
+@llvm_mips_vshf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_vshf_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_vshf_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_vshf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_vshf_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_vshf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: vshf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_vshf_w_test
+;
+@llvm_mips_vshf_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_vshf_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_vshf_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_vshf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_vshf_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_vshf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: vshf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_vshf_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_4r.ll b/test/CodeGen/Mips/msa/3r_4r.ll
new file mode 100644
index 0000000..b7fd728
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_4r.ll
@@ -0,0 +1,206 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format and
+; use the result as a third operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_maddv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maddv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_maddv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+@llvm_mips_maddv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maddv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_maddv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: maddv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maddv_b_test
+;
+@llvm_mips_maddv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maddv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_maddv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_maddv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddv_h_test
+;
+@llvm_mips_maddv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maddv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_maddv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_maddv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddv_w_test
+;
+@llvm_mips_maddv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maddv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_maddv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+@llvm_mips_maddv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maddv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_maddv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: maddv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maddv_d_test
+;
+@llvm_mips_msubv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_msubv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_msubv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+@llvm_mips_msubv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_msubv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_msubv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: msubv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_msubv_b_test
+;
+@llvm_mips_msubv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msubv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msubv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msubv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubv_h_test
+;
+@llvm_mips_msubv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msubv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msubv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msubv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubv_w_test
+;
+@llvm_mips_msubv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_msubv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_msubv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+@llvm_mips_msubv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_msubv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.msubv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_msubv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: msubv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_msubv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_4r_widen.ll b/test/CodeGen/Mips/msa/3r_4r_widen.ll
new file mode 100644
index 0000000..7063e45
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_4r_widen.ll
@@ -0,0 +1,307 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format and
+; use the result as a third operand and results in wider elements than the
+; operands had.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_dpadd_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpadd_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpadd_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpadd_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpadd_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpadd_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpadd_s_h_test
+;
+@llvm_mips_dpadd_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpadd_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpadd_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpadd_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpadd_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpadd_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpadd_s_w_test
+;
+@llvm_mips_dpadd_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpadd_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpadd_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpadd_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpadd_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpadd_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpadd_s_d_test
+;
+@llvm_mips_dpadd_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpadd_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpadd_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpadd_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpadd_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpadd_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpadd_u_h_test
+;
+@llvm_mips_dpadd_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpadd_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpadd_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpadd_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpadd_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpadd_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpadd_u_w_test
+;
+@llvm_mips_dpadd_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpadd_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpadd_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpadd_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpadd_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpadd_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpadd_u_d_test
+;
+@llvm_mips_dpsub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpsub_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpsub_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpsub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpsub_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpsub_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpsub_s_h_test
+;
+@llvm_mips_dpsub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpsub_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpsub_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpsub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpsub_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpsub_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpsub_s_w_test
+;
+@llvm_mips_dpsub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpsub_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpsub_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpsub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpsub_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpsub_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpsub_s_d_test
+;
+@llvm_mips_dpsub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpsub_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpsub_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpsub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpsub_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpsub_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpsub_u_h_test
+;
+@llvm_mips_dpsub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpsub_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpsub_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpsub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpsub_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpsub_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpsub_u_w_test
+;
+@llvm_mips_dpsub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpsub_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpsub_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpsub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpsub_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpsub_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpsub_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_splat.ll b/test/CodeGen/Mips/msa/3r_splat.ll
new file mode 100644
index 0000000..6b0cb26
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_splat.ll
@@ -0,0 +1,94 @@
+; Test the MSA splat intrinsics that are encoded with the 3R instruction
+; format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck -check-prefix=MIPS32 %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck -check-prefix=MIPS32 %s
+
+@llvm_mips_splat_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_splat_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splat_b_test(i32 %a) nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_b_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_b_RES)(
+; MIPS32-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.b [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.b [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_b_test
+
+@llvm_mips_splat_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_splat_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splat_h_test(i32 %a) nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_h_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_h_RES)(
+; MIPS32-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.h [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.h [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_h_test
+
+@llvm_mips_splat_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_splat_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splat_w_test(i32 %a) nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_w_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_w_RES)(
+; MIPS32-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.w [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.w [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_w_test
+
+@llvm_mips_splat_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_splat_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splat_d_test(i32 %a) nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.splat.d(<2 x i64>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_d_test:
+; FIXME: This test is currently disabled for MIPS32 because the indices are
+; difficult to match. This is because 64-bit values cannot be stored in
+; GPR32.
+; MIPS64-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_d_ARG1)(
+; MIPS64-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_d_RES)(
+; MIPS64-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS64-DAG: splat.d [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS64-DAG: st.d [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_d_test
diff --git a/test/CodeGen/Mips/msa/3rf.ll b/test/CodeGen/Mips/msa/3rf.ll
new file mode 100644
index 0000000..ae665af
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf.ll
@@ -0,0 +1,485 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fadd_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fadd_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fadd_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fadd.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fadd_w_test
+;
+@llvm_mips_fadd_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fadd_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fadd_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fadd.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fadd_d_test
+
+define void @fadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %2 = fadd <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ ret void
+}
+
+; CHECK: fadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fadd.w
+; CHECK: st.w
+; CHECK: .size fadd_w_test
+
+define void @fadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %2 = fadd <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ ret void
+}
+
+; CHECK: fadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fadd.d
+; CHECK: st.d
+; CHECK: .size fadd_d_test
+;
+@llvm_mips_fdiv_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fdiv_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fdiv_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fdiv_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fdiv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fdiv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fdiv_w_test
+;
+@llvm_mips_fdiv_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fdiv_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fdiv_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fdiv_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fdiv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fdiv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fdiv_d_test
+
+define void @fdiv_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %2 = fdiv <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ ret void
+}
+
+; CHECK: fdiv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fdiv.w
+; CHECK: st.w
+; CHECK: .size fdiv_w_test
+
+define void @fdiv_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %2 = fdiv <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ ret void
+}
+
+; CHECK: fdiv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fdiv.d
+; CHECK: st.d
+; CHECK: .size fdiv_d_test
+;
+@llvm_mips_fmin_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmin_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmin_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmin_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmin_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmin_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmin.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmin_w_test
+;
+@llvm_mips_fmin_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmin_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmin_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmin_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmin_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmin_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmin.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmin_d_test
+;
+@llvm_mips_fmin_a_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmin_a_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmin_a_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_a_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmin_a_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmin_a_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmin_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmin_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmin_a_w_test
+;
+@llvm_mips_fmin_a_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmin_a_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmin_a_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_a_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmin_a_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmin_a_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmin_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmin_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmin_a_d_test
+;
+@llvm_mips_fmax_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmax_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmax_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmax_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmax_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmax_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmax.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmax_w_test
+;
+@llvm_mips_fmax_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmax_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmax_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmax_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmax_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmax_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmax.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmax_d_test
+;
+@llvm_mips_fmax_a_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmax_a_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmax_a_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_a_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmax_a_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmax_a_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmax_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmax_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmax_a_w_test
+;
+@llvm_mips_fmax_a_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmax_a_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmax_a_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_a_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmax_a_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmax_a_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmax_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmax_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmax_a_d_test
+;
+@llvm_mips_fmul_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmul_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmul_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmul_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmul_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmul.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmul_w_test
+;
+@llvm_mips_fmul_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmul_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmul_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmul_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmul_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmul.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmul_d_test
+
+define void @fmul_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %2 = fmul <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ ret void
+}
+
+; CHECK: fmul_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmul.w
+; CHECK: st.w
+; CHECK: .size fmul_w_test
+
+define void @fmul_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %2 = fmul <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ ret void
+}
+
+; CHECK: fmul_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmul.d
+; CHECK: st.d
+; CHECK: .size fmul_d_test
+;
+@llvm_mips_fsub_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsub_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsub_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsub.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsub_w_test
+;
+@llvm_mips_fsub_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsub_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsub_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsub.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsub_d_test
+;
+
+define void @fsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %2 = fsub <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ ret void
+}
+
+; CHECK: fsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsub.w
+; CHECK: st.w
+; CHECK: .size fsub_w_test
+
+define void @fsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %2 = fsub <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ ret void
+}
+
+; CHECK: fsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsub.d
+; CHECK: st.d
+; CHECK: .size fsub_d_test
diff --git a/test/CodeGen/Mips/msa/3rf_4rf.ll b/test/CodeGen/Mips/msa/3rf_4rf.ll
new file mode 100644
index 0000000..67ef7fd
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_4rf.ll
@@ -0,0 +1,106 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; use the result as a third operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fmadd_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmadd_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmadd_w_ARG3 = global <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+01, float 1.100000e+01>, align 16
+@llvm_mips_fmadd_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmadd_w_ARG2
+ %2 = load <4 x float>* @llvm_mips_fmadd_w_ARG3
+ %3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmadd.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmadd_w_test
+;
+@llvm_mips_fmadd_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmadd_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmadd_d_ARG3 = global <2 x double> <double 4.000000e+00, double 5.000000e+00>, align 16
+@llvm_mips_fmadd_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmadd_d_ARG2
+ %2 = load <2 x double>* @llvm_mips_fmadd_d_ARG3
+ %3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmadd.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmadd_d_test
+;
+@llvm_mips_fmsub_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmsub_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmsub_w_ARG3 = global <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+01, float 1.100000e+01>, align 16
+@llvm_mips_fmsub_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmsub_w_ARG2
+ %2 = load <4 x float>* @llvm_mips_fmsub_w_ARG3
+ %3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmsub.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmsub_w_test
+;
+@llvm_mips_fmsub_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmsub_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmsub_d_ARG3 = global <2 x double> <double 4.000000e+00, double 5.000000e+00>, align 16
+@llvm_mips_fmsub_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmsub_d_ARG2
+ %2 = load <2 x double>* @llvm_mips_fmsub_d_ARG3
+ %3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmsub.d(<2 x double>, <2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmsub.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmsub_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
new file mode 100644
index 0000000..de28be0
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
@@ -0,0 +1,206 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; use the result as a third operand and perform fixed-point operations.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_madd_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_madd_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_madd_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_madd_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_madd_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_madd_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: madd_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_madd_q_h_test
+;
+@llvm_mips_madd_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_madd_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_madd_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_madd_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_madd_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_madd_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: madd_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_madd_q_w_test
+;
+@llvm_mips_maddr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maddr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_maddr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_maddr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddr_q_h_test
+;
+@llvm_mips_maddr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maddr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_maddr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_maddr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddr_q_w_test
+;
+@llvm_mips_msub_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msub_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msub_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msub_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msub_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msub_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msub_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msub_q_h_test
+;
+@llvm_mips_msub_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msub_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msub_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msub_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msub_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msub_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msub_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msub_q_w_test
+;
+@llvm_mips_msubr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msubr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msubr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msubr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubr_q_h_test
+;
+@llvm_mips_msubr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msubr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msubr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msubr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msubr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubr_q_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_exdo.ll b/test/CodeGen/Mips/msa/3rf_exdo.ll
new file mode 100644
index 0000000..8a7f268
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_exdo.ll
@@ -0,0 +1,50 @@
+; Test the MSA floating-point conversion intrinsics that are encoded with the
+; 3RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexdo_h_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexdo_h_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fexdo_h_RES = global <8 x half> <half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00>, align 16
+
+define void @llvm_mips_fexdo_h_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexdo_h_ARG1
+ %1 = load <4 x float>* @llvm_mips_fexdo_h_ARG2
+ %2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1)
+ store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES
+ ret void
+}
+
+declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fexdo_h_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fexdo.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_fexdo_h_test
+;
+@llvm_mips_fexdo_w_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fexdo_w_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fexdo_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexdo_w_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fexdo_w_ARG1
+ %1 = load <2 x double>* @llvm_mips_fexdo_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexdo.w(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fexdo_w_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fexdo.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexdo_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_float_int.ll b/test/CodeGen/Mips/msa/3rf_float_int.ll
new file mode 100644
index 0000000..7b01e17
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_float_int.ll
@@ -0,0 +1,50 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; take an integer as an operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexp2_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexp2_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_fexp2_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexp2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexp2_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_fexp2_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_fexp2_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fexp2.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexp2_w_test
+;
+@llvm_mips_fexp2_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fexp2_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_fexp2_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexp2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fexp2_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_fexp2_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexp2.d(<2 x double>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_fexp2_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fexp2.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexp2_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_int_float.ll b/test/CodeGen/Mips/msa/3rf_int_float.ll
new file mode 100644
index 0000000..5624771
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_int_float.ll
@@ -0,0 +1,974 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; produce an integer as a result.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fcaf_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcaf_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcaf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcaf_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcaf_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcaf_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcaf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcaf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcaf_w_test
+;
+@llvm_mips_fcaf_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcaf_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcaf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcaf_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcaf_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcaf_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcaf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcaf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcaf_d_test
+;
+@llvm_mips_fceq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fceq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fceq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fceq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fceq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fceq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fceq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fceq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fceq_w_test
+;
+@llvm_mips_fceq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fceq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fceq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fceq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fceq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fceq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fceq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fceq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fceq_d_test
+;
+@llvm_mips_fcle_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcle_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcle_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcle_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcle_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcle_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcle_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcle.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcle_w_test
+;
+@llvm_mips_fcle_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcle_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcle_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcle_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcle_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcle_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcle_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcle.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcle_d_test
+;
+@llvm_mips_fclt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fclt_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fclt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fclt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fclt_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fclt_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fclt_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fclt.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fclt_w_test
+;
+@llvm_mips_fclt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fclt_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fclt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fclt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fclt_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fclt_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fclt_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fclt.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fclt_d_test
+;
+@llvm_mips_fcor_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcor_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcor_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcor_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcor_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcor_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcor_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcor.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcor_w_test
+;
+@llvm_mips_fcor_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcor_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcor_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcor_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcor_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcor_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcor_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcor.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcor_d_test
+;
+@llvm_mips_fcne_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcne_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcne_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcne_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcne_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcne_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcne_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcne.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcne_w_test
+;
+@llvm_mips_fcne_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcne_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcne_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcne_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcne_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcne_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcne_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcne.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcne_d_test
+;
+@llvm_mips_fcueq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcueq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcueq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcueq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcueq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcueq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcueq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcueq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcueq_w_test
+;
+@llvm_mips_fcueq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcueq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcueq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcueq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcueq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcueq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcueq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcueq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcueq_d_test
+;
+@llvm_mips_fcult_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcult_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcult_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcult_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcult_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcult_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcult_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcult.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcult_w_test
+;
+@llvm_mips_fcult_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcult_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcult_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcult_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcult_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcult_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcult_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcult.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcult_d_test
+;
+@llvm_mips_fcule_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcule_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcule_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcule_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcule_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcule_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcule_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcule.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcule_w_test
+;
+@llvm_mips_fcule_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcule_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcule_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcule_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcule_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcule_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcule_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcule.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcule_d_test
+;
+@llvm_mips_fcun_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcun_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcun_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcun_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcun_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcun_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcun_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcun.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcun_w_test
+;
+@llvm_mips_fcun_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcun_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcun_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcun_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcun_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcun_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcun_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcun.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcun_d_test
+;
+@llvm_mips_fcune_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcune_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcune_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcune_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcune_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcune_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcune_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcune.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcune_w_test
+;
+@llvm_mips_fcune_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcune_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcune_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcune_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcune_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcune_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcune_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcune.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcune_d_test
+;
+@llvm_mips_fsaf_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsaf_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsaf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsaf_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsaf_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsaf_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsaf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsaf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsaf_w_test
+;
+@llvm_mips_fsaf_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsaf_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsaf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsaf_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsaf_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsaf_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsaf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsaf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsaf_d_test
+;
+@llvm_mips_fseq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fseq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fseq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fseq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fseq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fseq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fseq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fseq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fseq_w_test
+;
+@llvm_mips_fseq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fseq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fseq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fseq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fseq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fseq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fseq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fseq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fseq_d_test
+;
+@llvm_mips_fsle_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsle_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsle_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsle_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsle_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsle_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsle_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsle.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsle_w_test
+;
+@llvm_mips_fsle_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsle_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsle_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsle_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsle_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsle_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsle_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsle.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsle_d_test
+;
+@llvm_mips_fslt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fslt_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fslt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fslt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fslt_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fslt_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fslt_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fslt.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fslt_w_test
+;
+@llvm_mips_fslt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fslt_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fslt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fslt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fslt_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fslt_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fslt_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fslt.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fslt_d_test
+;
+@llvm_mips_fsor_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsor_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsor_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsor_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsor_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsor_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsor_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsor.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsor_w_test
+;
+@llvm_mips_fsor_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsor_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsor_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsor_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsor_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsor_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsor_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsor.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsor_d_test
+;
+@llvm_mips_fsne_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsne_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsne_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsne_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsne_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsne_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsne_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsne.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsne_w_test
+;
+@llvm_mips_fsne_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsne_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsne_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsne_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsne_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsne_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsne_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsne.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsne_d_test
+;
+@llvm_mips_fsueq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsueq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsueq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsueq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsueq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsueq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsueq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsueq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsueq_w_test
+;
+@llvm_mips_fsueq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsueq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsueq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsueq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsueq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsueq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsueq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsueq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsueq_d_test
+;
+@llvm_mips_fsult_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsult_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsult_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsult_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsult_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsult_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsult_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsult.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsult_w_test
+;
+@llvm_mips_fsult_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsult_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsult_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsult_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsult_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsult_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsult_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsult.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsult_d_test
+;
+@llvm_mips_fsule_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsule_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsule_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsule_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsule_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsule_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsule_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsule.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsule_w_test
+;
+@llvm_mips_fsule_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsule_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsule_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsule_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsule_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsule_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsule_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsule.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsule_d_test
+;
+@llvm_mips_fsun_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsun_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsun_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsun_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsun_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsun_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsun_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsun.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsun_w_test
+;
+@llvm_mips_fsun_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsun_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsun_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsun_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsun_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsun_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsun_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsun.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsun_d_test
+;
+@llvm_mips_fsune_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsune_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsune_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsune_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsune_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsune_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsune_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsune.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsune_w_test
+;
+@llvm_mips_fsune_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsune_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsune_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsune_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsune_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsune_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsune.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsune_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsune.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsune_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_q.ll b/test/CodeGen/Mips/msa/3rf_q.ll
new file mode 100644
index 0000000..f7000ee
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_q.ll
@@ -0,0 +1,94 @@
+; Test the MSA fixed-point intrinsics that are encoded with the 3RF instruction
+; format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_mul_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mul_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mul_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mul_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mul_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mul_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mul_q_h_test
+;
+@llvm_mips_mul_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mul_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mul_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mul_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mul_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mul_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mul_q_w_test
+;
+@llvm_mips_mulr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mulr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mulr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulr_q_h_test
+;
+@llvm_mips_mulr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mulr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mulr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mulr.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulr_q_w_test
+;
diff --git a/test/CodeGen/Mips/msa/arithmetic.ll b/test/CodeGen/Mips/msa/arithmetic.ll
new file mode 100644
index 0000000..09ee502
--- /dev/null
+++ b/test/CodeGen/Mips/msa/arithmetic.ll
@@ -0,0 +1,726 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: add_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <16 x i8> %1, %2
+ ; CHECK-DAG: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v16i8
+}
+
+define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: add_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <8 x i16> %1, %2
+ ; CHECK-DAG: addv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v8i16
+}
+
+define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: add_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <4 x i32> %1, %2
+ ; CHECK-DAG: addv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4i32
+}
+
+define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: add_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <2 x i64> %1, %2
+ ; CHECK-DAG: addv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2i64
+}
+
+define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: add_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: addvi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v16i8_i
+}
+
+define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: add_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: addvi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v8i16_i
+}
+
+define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: add_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4i32_i
+}
+
+define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: add_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2i64_i
+}
+
+define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sub_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <16 x i8> %1, %2
+ ; CHECK-DAG: subv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v16i8
+}
+
+define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sub_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <8 x i16> %1, %2
+ ; CHECK-DAG: subv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v8i16
+}
+
+define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sub_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <4 x i32> %1, %2
+ ; CHECK-DAG: subv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4i32
+}
+
+define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sub_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <2 x i64> %1, %2
+ ; CHECK-DAG: subv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2i64
+}
+
+define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sub_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: subvi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v16i8_i
+}
+
+define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sub_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: subvi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v8i16_i
+}
+
+define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sub_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4i32_i
+}
+
+define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sub_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2i64_i
+}
+
+define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mul_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <16 x i8> %1, %2
+ ; CHECK-DAG: mulv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v16i8
+}
+
+define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mul_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <8 x i16> %1, %2
+ ; CHECK-DAG: mulv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v8i16
+}
+
+define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mul_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <4 x i32> %1, %2
+ ; CHECK-DAG: mulv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v4i32
+}
+
+define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mul_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <2 x i64> %1, %2
+ ; CHECK-DAG: mulv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v2i64
+}
+
+define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: maddv_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <16 x i8> %2, %3
+ %5 = add <16 x i8> %4, %1
+ ; CHECK-DAG: maddv.b [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v16i8
+}
+
+define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: maddv_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <8 x i16> %2, %3
+ %5 = add <8 x i16> %4, %1
+ ; CHECK-DAG: maddv.h [[R1]], [[R2]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v8i16
+}
+
+define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: maddv_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <4 x i32> %2, %3
+ %5 = add <4 x i32> %4, %1
+ ; CHECK-DAG: maddv.w [[R1]], [[R2]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v4i32
+}
+
+define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: maddv_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <2 x i64> %2, %3
+ %5 = add <2 x i64> %4, %1
+ ; CHECK-DAG: maddv.d [[R1]], [[R2]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v2i64
+}
+
+define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: msubv_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <16 x i8> %2, %3
+ %5 = sub <16 x i8> %1, %4
+ ; CHECK-DAG: msubv.b [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v16i8
+}
+
+define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: msubv_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <8 x i16> %2, %3
+ %5 = sub <8 x i16> %1, %4
+ ; CHECK-DAG: msubv.h [[R1]], [[R2]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v8i16
+}
+
+define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: msubv_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <4 x i32> %2, %3
+ %5 = sub <4 x i32> %1, %4
+ ; CHECK-DAG: msubv.w [[R1]], [[R2]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v4i32
+}
+
+define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: msubv_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <2 x i64> %2, %3
+ %5 = sub <2 x i64> %1, %4
+ ; CHECK-DAG: msubv.d [[R1]], [[R2]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v2i64
+}
+
+define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: div_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <16 x i8> %1, %2
+ ; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v16i8
+}
+
+define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: div_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <8 x i16> %1, %2
+ ; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v8i16
+}
+
+define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: div_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <4 x i32> %1, %2
+ ; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v4i32
+}
+
+define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: div_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <2 x i64> %1, %2
+ ; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v2i64
+}
+
+define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: div_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <16 x i8> %1, %2
+ ; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v16i8
+}
+
+define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: div_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <8 x i16> %1, %2
+ ; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v8i16
+}
+
+define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: div_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <4 x i32> %1, %2
+ ; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v4i32
+}
+
+define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: div_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <2 x i64> %1, %2
+ ; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v2i64
+}
+
+define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mod_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <16 x i8> %1, %2
+ ; CHECK-DAG: mod_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v16i8
+}
+
+define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mod_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <8 x i16> %1, %2
+ ; CHECK-DAG: mod_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v8i16
+}
+
+define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mod_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <4 x i32> %1, %2
+ ; CHECK-DAG: mod_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v4i32
+}
+
+define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mod_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <2 x i64> %1, %2
+ ; CHECK-DAG: mod_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v2i64
+}
+
+define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mod_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <16 x i8> %1, %2
+ ; CHECK-DAG: mod_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v16i8
+}
+
+define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mod_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <8 x i16> %1, %2
+ ; CHECK-DAG: mod_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v8i16
+}
+
+define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mod_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <4 x i32> %1, %2
+ ; CHECK-DAG: mod_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v4i32
+}
+
+define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mod_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <2 x i64> %1, %2
+ ; CHECK-DAG: mod_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v2i64
+}
diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll
new file mode 100644
index 0000000..dc38721
--- /dev/null
+++ b/test/CodeGen/Mips/msa/arithmetic_float.ll
@@ -0,0 +1,456 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: add_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fadd <4 x float> %1, %2
+ ; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4f32
+}
+
+define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: add_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fadd <2 x double> %1, %2
+ ; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2f64
+}
+
+define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: sub_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fsub <4 x float> %1, %2
+ ; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4f32
+}
+
+define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: sub_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fsub <2 x double> %1, %2
+ ; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2f64
+}
+
+define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: mul_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fmul <4 x float> %1, %2
+ ; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v4f32
+}
+
+define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: mul_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fmul <2 x double> %1, %2
+ ; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v2f64
+}
+
+define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: fma_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2,
+ <4 x float> %3)
+ ; CHECK-DAG: fmadd.w [[R1]], [[R2]], [[R3]]
+ store <4 x float> %4, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fma_v4f32
+}
+
+define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: fma_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2,
+ <2 x double> %3)
+ ; CHECK-DAG: fmadd.d [[R1]], [[R2]], [[R3]]
+ store <2 x double> %4, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fma_v2f64
+}
+
+define void @fmsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: fmsub_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = fmul <4 x float> %2, %3
+ %5 = fsub <4 x float> %1, %4
+ ; CHECK-DAG: fmsub.w [[R1]], [[R2]], [[R3]]
+ store <4 x float> %5, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fmsub_v4f32
+}
+
+define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: fmsub_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = fmul <2 x double> %2, %3
+ %5 = fsub <2 x double> %1, %4
+ ; CHECK-DAG: fmsub.d [[R1]], [[R2]], [[R3]]
+ store <2 x double> %5, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fmsub_v2f64
+}
+
+define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: fdiv_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fdiv <4 x float> %1, %2
+ ; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fdiv_v4f32
+}
+
+define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: fdiv_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fdiv <2 x double> %1, %2
+ ; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fdiv_v2f64
+}
+
+define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fabs_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fabs_v4f32
+}
+
+define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fabs_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fabs_v2f64
+}
+
+define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fexp2_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.w [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v4f32
+}
+
+define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fexp2_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: ffint_u.d [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v2f64
+}
+
+define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fexp2_v4f32_2:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
+ %3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2
+ ; CHECK-DAG: lui [[R3:\$[0-9]+]], 16384
+ ; CHECK-DAG: fill.w [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.w [[R5:\$w[0-9]+]], [[R4]], [[R1]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R5]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v4f32_2
+}
+
+define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: .8byte 4611686018427387904
+ ; CHECK-NEXT: .8byte 4611686018427387904
+ ; CHECK: fexp2_v2f64_2:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
+ %3 = fmul <2 x double> <double 2.0, double 2.0>, %2
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo(
+ ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v2f64_2
+}
+
+define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fsqrt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fsqrt_v4f32
+}
+
+define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fsqrt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fsqrt_v2f64
+}
+
+define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ffint_u_v4f32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = uitofp <4 x i32> %1 to <4 x float>
+ ; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_u_v4f32
+}
+
+define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ffint_u_v2f64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = uitofp <2 x i64> %1 to <2 x double>
+ ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_u_v2f64
+}
+
+define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ffint_s_v4f32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = sitofp <4 x i32> %1 to <4 x float>
+ ; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_s_v4f32
+}
+
+define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ffint_s_v2f64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = sitofp <2 x i64> %1 to <2 x double>
+ ; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_s_v2f64
+}
+
+define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: ftrunc_u_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptoui <4 x float> %1 to <4 x i32>
+ ; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_u_v4f32
+}
+
+define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: ftrunc_u_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptoui <2 x double> %1 to <2 x i64>
+ ; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_u_v2f64
+}
+
+define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: ftrunc_s_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptosi <4 x float> %1 to <4 x i32>
+ ; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_s_v4f32
+}
+
+define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: ftrunc_s_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptosi <2 x double> %1 to <2 x i64>
+ ; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_s_v2f64
+}
+
+declare <4 x float> @llvm.fabs.v4f32(<4 x float> %Val)
+declare <2 x double> @llvm.fabs.v2f64(<2 x double> %Val)
+declare <4 x float> @llvm.exp2.v4f32(<4 x float> %val)
+declare <2 x double> @llvm.exp2.v2f64(<2 x double> %val)
+declare <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b,
+ <4 x float> %c)
+declare <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b,
+ <2 x double> %c)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %Val)
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %Val)
diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll
new file mode 100644
index 0000000..0169a07
--- /dev/null
+++ b/test/CodeGen/Mips/msa/basic_operations.ll
@@ -0,0 +1,481 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-BE %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-LE %s
+
+@v4i8 = global <4 x i8> <i8 0, i8 0, i8 0, i8 0>
+@v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+@v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+@v4i32 = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+@v2i64 = global <2 x i64> <i64 0, i64 0>
+@i64 = global i64 0
+
+define void @const_v16i8() nounwind {
+ ; MIPS32-AE: const_v16i8:
+
+ store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
+ ; MIPS32-BE: ldi.h [[R1:\$w[0-9]+]], 256
+ ; MIPS32-LE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8
+ ; MIPS32-BE-DAG: lui [[R2:\$[0-9]+]], 258
+ ; MIPS32-LE-DAG: lui [[R2:\$[0-9]+]], 1027
+ ; MIPS32-BE-DAG: ori [[R2]], [[R2]], 772
+ ; MIPS32-LE-DAG: ori [[R2]], [[R2]], 513
+ ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v16i8
+}
+
+define void @const_v8i16() nounwind {
+ ; MIPS32-AE: const_v8i16:
+
+ store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 4
+
+ store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, <8 x i16>*@v8i16
+ ; MIPS32-BE-DAG: lui [[R2:\$[0-9]+]], 1
+ ; MIPS32-LE-DAG: lui [[R2:\$[0-9]+]], 2
+ ; MIPS32-BE-DAG: ori [[R2]], [[R2]], 2
+ ; MIPS32-LE-DAG: ori [[R2]], [[R2]], 1
+ ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
+
+ store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v8i16
+}
+
+define void @const_v4i32() nounwind {
+ ; MIPS32-AE: const_v4i32:
+
+ store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v4i32
+}
+
+define void @const_v2i64() nounwind {
+ ; MIPS32-AE: const_v2i64:
+
+ store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.d [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v2i64
+}
+
+define void @nonconst_v16i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8 %h) nounwind {
+ ; MIPS32-AE: nonconst_v16i8:
+
+ %1 = insertelement <16 x i8> undef, i8 %a, i32 0
+ %2 = insertelement <16 x i8> %1, i8 %b, i32 1
+ %3 = insertelement <16 x i8> %2, i8 %c, i32 2
+ %4 = insertelement <16 x i8> %3, i8 %d, i32 3
+ %5 = insertelement <16 x i8> %4, i8 %e, i32 4
+ %6 = insertelement <16 x i8> %5, i8 %f, i32 5
+ %7 = insertelement <16 x i8> %6, i8 %g, i32 6
+ %8 = insertelement <16 x i8> %7, i8 %h, i32 7
+ %9 = insertelement <16 x i8> %8, i8 %h, i32 8
+ %10 = insertelement <16 x i8> %9, i8 %h, i32 9
+ %11 = insertelement <16 x i8> %10, i8 %h, i32 10
+ %12 = insertelement <16 x i8> %11, i8 %h, i32 11
+ %13 = insertelement <16 x i8> %12, i8 %h, i32 12
+ %14 = insertelement <16 x i8> %13, i8 %h, i32 13
+ %15 = insertelement <16 x i8> %14, i8 %h, i32 14
+ %16 = insertelement <16 x i8> %15, i8 %h, i32 15
+ ; MIPS32-AE-DAG: insert.b [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE-DAG: insert.b [[R1]][1], $5
+ ; MIPS32-AE-DAG: insert.b [[R1]][2], $6
+ ; MIPS32-AE-DAG: insert.b [[R1]][3], $7
+ ; MIPS32-BE-DAG: lbu [[R2:\$[0-9]+]], 19($sp)
+ ; MIPS32-LE-DAG: lbu [[R2:\$[0-9]+]], 16($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][4], [[R2]]
+ ; MIPS32-BE-DAG: lbu [[R3:\$[0-9]+]], 23($sp)
+ ; MIPS32-LE-DAG: lbu [[R3:\$[0-9]+]], 20($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][5], [[R3]]
+ ; MIPS32-BE-DAG: lbu [[R4:\$[0-9]+]], 27($sp)
+ ; MIPS32-LE-DAG: lbu [[R4:\$[0-9]+]], 24($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][6], [[R4]]
+ ; MIPS32-BE-DAG: lbu [[R5:\$[0-9]+]], 31($sp)
+ ; MIPS32-LE-DAG: lbu [[R5:\$[0-9]+]], 28($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][7], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][8], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][9], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][10], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][11], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][12], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][13], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][14], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][15], [[R5]]
+
+ store volatile <16 x i8> %16, <16 x i8>*@v16i8
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v16i8
+}
+
+define void @nonconst_v8i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h) nounwind {
+ ; MIPS32-AE: nonconst_v8i16:
+
+ %1 = insertelement <8 x i16> undef, i16 %a, i32 0
+ %2 = insertelement <8 x i16> %1, i16 %b, i32 1
+ %3 = insertelement <8 x i16> %2, i16 %c, i32 2
+ %4 = insertelement <8 x i16> %3, i16 %d, i32 3
+ %5 = insertelement <8 x i16> %4, i16 %e, i32 4
+ %6 = insertelement <8 x i16> %5, i16 %f, i32 5
+ %7 = insertelement <8 x i16> %6, i16 %g, i32 6
+ %8 = insertelement <8 x i16> %7, i16 %h, i32 7
+ ; MIPS32-AE-DAG: insert.h [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE-DAG: insert.h [[R1]][1], $5
+ ; MIPS32-AE-DAG: insert.h [[R1]][2], $6
+ ; MIPS32-AE-DAG: insert.h [[R1]][3], $7
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 18($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 16($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][4], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 22($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 20($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][5], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 26($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 24($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][6], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 30($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 28($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][7], [[R2]]
+
+ store volatile <8 x i16> %8, <8 x i16>*@v8i16
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v8i16
+}
+
+define void @nonconst_v4i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ ; MIPS32-AE: nonconst_v4i32:
+
+ %1 = insertelement <4 x i32> undef, i32 %a, i32 0
+ %2 = insertelement <4 x i32> %1, i32 %b, i32 1
+ %3 = insertelement <4 x i32> %2, i32 %c, i32 2
+ %4 = insertelement <4 x i32> %3, i32 %d, i32 3
+ ; MIPS32-AE: insert.w [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE: insert.w [[R1]][1], $5
+ ; MIPS32-AE: insert.w [[R1]][2], $6
+ ; MIPS32-AE: insert.w [[R1]][3], $7
+
+ store volatile <4 x i32> %4, <4 x i32>*@v4i32
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v4i32
+}
+
+define void @nonconst_v2i64(i64 %a, i64 %b) nounwind {
+ ; MIPS32-AE: nonconst_v2i64:
+
+ %1 = insertelement <2 x i64> undef, i64 %a, i32 0
+ %2 = insertelement <2 x i64> %1, i64 %b, i32 1
+ ; MIPS32-AE: insert.w [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE: insert.w [[R1]][1], $5
+ ; MIPS32-AE: insert.w [[R1]][2], $6
+ ; MIPS32-AE: insert.w [[R1]][3], $7
+
+ store volatile <2 x i64> %2, <2 x i64>*@v2i64
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v2i64
+}
+
+define i32 @extract_sext_v16i8() nounwind {
+ ; MIPS32-AE: extract_sext_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <16 x i8> %2, i32 1
+ %4 = sext i8 %3 to i32
+ ; MIPS32-AE-DAG: copy_s.b [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_sext_v16i8
+}
+
+define i32 @extract_sext_v8i16() nounwind {
+ ; MIPS32-AE: extract_sext_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <8 x i16> %2, i32 1
+ %4 = sext i16 %3 to i32
+ ; MIPS32-AE-DAG: copy_s.h [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_sext_v8i16
+}
+
+define i32 @extract_sext_v4i32() nounwind {
+ ; MIPS32-AE: extract_sext_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x i32> %2, i32 1
+ ; MIPS32-AE-DAG: copy_s.w [[R3:\$[0-9]+]], [[R1]][1]
+
+ ret i32 %3
+ ; MIPS32-AE: .size extract_sext_v4i32
+}
+
+define i64 @extract_sext_v2i64() nounwind {
+ ; MIPS32-AE: extract_sext_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x i64> %2, i32 1
+ ; MIPS32-AE-DAG: copy_s.w [[R3:\$[0-9]+]], [[R1]][2]
+ ; MIPS32-AE-DAG: copy_s.w [[R4:\$[0-9]+]], [[R1]][3]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i64 %3
+ ; MIPS32-AE: .size extract_sext_v2i64
+}
+
+define i32 @extract_zext_v16i8() nounwind {
+ ; MIPS32-AE: extract_zext_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <16 x i8> %2, i32 1
+ %4 = zext i8 %3 to i32
+ ; MIPS32-AE-DAG: copy_u.b [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: andi
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_zext_v16i8
+}
+
+define i32 @extract_zext_v8i16() nounwind {
+ ; MIPS32-AE: extract_zext_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <8 x i16> %2, i32 1
+ %4 = zext i16 %3 to i32
+ ; MIPS32-AE-DAG: copy_u.h [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: andi
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_zext_v8i16
+}
+
+define i32 @extract_zext_v4i32() nounwind {
+ ; MIPS32-AE: extract_zext_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x i32> %2, i32 1
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R3:\$[0-9]+]], [[R1]][1]
+
+ ret i32 %3
+ ; MIPS32-AE: .size extract_zext_v4i32
+}
+
+define i64 @extract_zext_v2i64() nounwind {
+ ; MIPS32-AE: extract_zext_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x i64> %2, i32 1
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R3:\$[0-9]+]], [[R1]][2]
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R4:\$[0-9]+]], [[R1]][3]
+ ; MIPS32-AE-NOT: andi
+
+ ret i64 %3
+ ; MIPS32-AE: .size extract_zext_v2i64
+}
+
+define void @insert_v16i8(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %a2 = trunc i32 %a to i8
+ %a3 = sext i8 %a2 to i32
+ %a4 = trunc i32 %a3 to i8
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <16 x i8> %1, i8 %a4, i32 1
+ ; MIPS32-AE-DAG: insert.b [[R1]][1], $4
+
+ store <16 x i8> %2, <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: st.b [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v16i8
+}
+
+define void @insert_v8i16(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %a2 = trunc i32 %a to i16
+ %a3 = sext i16 %a2 to i32
+ %a4 = trunc i32 %a3 to i16
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <8 x i16> %1, i16 %a4, i32 1
+ ; MIPS32-AE-DAG: insert.h [[R1]][1], $4
+
+ store <8 x i16> %2, <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: st.h [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v8i16
+}
+
+define void @insert_v4i32(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <4 x i32> %1, i32 %a, i32 1
+ ; MIPS32-AE-DAG: insert.w [[R1]][1], $4
+
+ store <4 x i32> %2, <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v4i32
+}
+
+define void @insert_v2i64(i64 %a) nounwind {
+ ; MIPS32-AE: insert_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <2 x i64> %1, i64 %a, i32 1
+ ; MIPS32-AE-DAG: insert.w [[R1]][2], $4
+ ; MIPS32-AE-DAG: insert.w [[R1]][3], $5
+
+ store <2 x i64> %2, <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v2i64
+}
+
+define void @truncstore() nounwind {
+ ; MIPS32-AE: truncstore:
+
+ store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8>*@v4i8
+ ; TODO: What code should be emitted?
+
+ ret void
+ ; MIPS32-AE: .size truncstore
+}
diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll
new file mode 100644
index 0000000..1f53810
--- /dev/null
+++ b/test/CodeGen/Mips/msa/basic_operations_float.ll
@@ -0,0 +1,207 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32 %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32 %s
+
+@v4f32 = global <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>
+@v2f64 = global <2 x double> <double 0.0, double 0.0>
+@f32 = global float 0.0
+@f64 = global double 0.0
+
+define void @const_v4f32() nounwind {
+ ; MIPS32: const_v4f32:
+
+ store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32
+ ; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32
+ ; MIPS32: lui [[R1:\$[0-9]+]], 16256
+ ; MIPS32: fill.w [[R2:\$w[0-9]+]], [[R1]]
+
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32
+ ; MIPS32: lui [[R1:\$[0-9]+]], 18304
+ ; MIPS32: ori [[R2:\$[0-9]+]], [[R1]], 128
+ ; MIPS32: fill.w [[R3:\$w[0-9]+]], [[R2]]
+
+ store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32: .size const_v4f32
+}
+
+define void @const_v2f64() nounwind {
+ ; MIPS32: const_v2f64:
+
+ store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
+ ; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32: .size const_v2f64
+}
+
+define void @nonconst_v4f32() nounwind {
+ ; MIPS32: nonconst_v4f32:
+
+ %1 = load float *@f32
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %3 = insertelement <4 x float> %2, float %1, i32 1
+ %4 = insertelement <4 x float> %3, float %1, i32 2
+ %5 = insertelement <4 x float> %4, float %1, i32 3
+ store volatile <4 x float> %5, <4 x float>*@v4f32
+ ; MIPS32: lwc1 $f[[R1:[0-9]+]], 0(
+ ; MIPS32: splati.w [[R2:\$w[0-9]+]], $w[[R1]]
+
+ ret void
+ ; MIPS32: .size nonconst_v4f32
+}
+
+define void @nonconst_v2f64() nounwind {
+ ; MIPS32: nonconst_v2f64:
+
+ %1 = load double *@f64
+ %2 = insertelement <2 x double> undef, double %1, i32 0
+ %3 = insertelement <2 x double> %2, double %1, i32 1
+ store volatile <2 x double> %3, <2 x double>*@v2f64
+ ; MIPS32: ldc1 $f[[R1:[0-9]+]], 0(
+ ; MIPS32: splati.d [[R2:\$w[0-9]+]], $w[[R1]]
+
+ ret void
+ ; MIPS32: .size nonconst_v2f64
+}
+
+define float @extract_v4f32() nounwind {
+ ; MIPS32: extract_v4f32:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x float> %2, i32 1
+ ; Element 1 can be obtained by splatting it across the vector and extracting
+ ; $w0:sub_lo
+ ; MIPS32-DAG: splati.w $w0, [[R1]][1]
+
+ ret float %3
+ ; MIPS32: .size extract_v4f32
+}
+
+define float @extract_v4f32_elt0() nounwind {
+ ; MIPS32: extract_v4f32_elt0:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w $w0, [[R1]], [[R1]]
+
+ %3 = extractelement <4 x float> %2, i32 0
+ ; Element 0 can be obtained by extracting $w0:sub_lo ($f0)
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+
+ ret float %3
+ ; MIPS32: .size extract_v4f32_elt0
+}
+
+define double @extract_v2f64() nounwind {
+ ; MIPS32: extract_v2f64:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = fadd <2 x double> %1, %1
+ ; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x double> %2, i32 1
+ ; Element 1 can be obtained by splatting it across the vector and extracting
+ ; $w0:sub_64
+ ; MIPS32-DAG: splati.d $w0, [[R1]][1]
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+ ; MIPS32-NOT: mthc1
+ ; MIPS32-NOT: sll
+ ; MIPS32-NOT: sra
+
+ ret double %3
+ ; MIPS32: .size extract_v2f64
+}
+
+define double @extract_v2f64_elt0() nounwind {
+ ; MIPS32: extract_v2f64_elt0:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = fadd <2 x double> %1, %1
+ ; MIPS32-DAG: fadd.d $w0, [[R1]], [[R1]]
+
+ %3 = extractelement <2 x double> %2, i32 0
+ ; Element 0 can be obtained by extracting $w0:sub_64 ($f0)
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+ ; MIPS32-NOT: mthc1
+ ; MIPS32-NOT: sll
+ ; MIPS32-NOT: sra
+
+ ret double %3
+ ; MIPS32: .size extract_v2f64_elt0
+}
+
+define void @insert_v4f32(float %a) nounwind {
+ ; MIPS32: insert_v4f32:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = insertelement <4 x float> %1, float %a, i32 1
+ ; float argument passed in $f12
+ ; MIPS32-DAG: insve.w [[R1]][1], $w12[0]
+
+ store <4 x float> %2, <4 x float>* @v4f32
+ ; MIPS32-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v4f32
+}
+
+define void @insert_v2f64(double %a) nounwind {
+ ; MIPS32: insert_v2f64:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = insertelement <2 x double> %1, double %a, i32 1
+ ; double argument passed in $f12
+ ; MIPS32-DAG: insve.d [[R1]][1], $w12[0]
+
+ store <2 x double> %2, <2 x double>* @v2f64
+ ; MIPS32-DAG: st.d [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v2f64
+}
diff --git a/test/CodeGen/Mips/msa/bit.ll b/test/CodeGen/Mips/msa/bit.ll
new file mode 100644
index 0000000..59ddbe1
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bit.ll
@@ -0,0 +1,537 @@
+; Test the MSA intrinsics that are encoded with the BIT instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sat_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sat_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_b_test:
+; CHECK: ld.b
+; CHECK: sat_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_s_b_test
+;
+@llvm_mips_sat_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sat_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_h_test:
+; CHECK: ld.h
+; CHECK: sat_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_s_h_test
+;
+@llvm_mips_sat_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sat_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_w_test:
+; CHECK: ld.w
+; CHECK: sat_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_s_w_test
+;
+@llvm_mips_sat_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sat_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_d_test:
+; CHECK: ld.d
+; CHECK: sat_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_s_d_test
+;
+@llvm_mips_sat_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sat_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_b_test:
+; CHECK: ld.b
+; CHECK: sat_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_u_b_test
+;
+@llvm_mips_sat_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sat_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_h_test:
+; CHECK: ld.h
+; CHECK: sat_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_u_h_test
+;
+@llvm_mips_sat_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sat_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_w_test:
+; CHECK: ld.w
+; CHECK: sat_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_u_w_test
+;
+@llvm_mips_sat_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sat_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_d_test:
+; CHECK: ld.d
+; CHECK: sat_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_u_d_test
+;
+@llvm_mips_slli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_slli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_slli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_slli_b_test:
+; CHECK: ld.b
+; CHECK: slli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_slli_b_test
+;
+@llvm_mips_slli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_slli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_slli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_slli_h_test:
+; CHECK: ld.h
+; CHECK: slli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_slli_h_test
+;
+@llvm_mips_slli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_slli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_slli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_slli_w_test:
+; CHECK: ld.w
+; CHECK: slli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_slli_w_test
+;
+@llvm_mips_slli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_slli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_slli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_slli_d_test:
+; CHECK: ld.d
+; CHECK: slli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_slli_d_test
+;
+@llvm_mips_srai_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srai_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srai_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srai_b_test:
+; CHECK: ld.b
+; CHECK: srai.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srai_b_test
+;
+@llvm_mips_srai_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srai_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srai_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srai_h_test:
+; CHECK: ld.h
+; CHECK: srai.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srai_h_test
+;
+@llvm_mips_srai_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srai_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srai_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srai_w_test:
+; CHECK: ld.w
+; CHECK: srai.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srai_w_test
+;
+@llvm_mips_srai_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srai_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srai_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srai_d_test:
+; CHECK: ld.d
+; CHECK: srai.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srai_d_test
+;
+@llvm_mips_srari_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srari_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srari_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srari_b_test:
+; CHECK: ld.b
+; CHECK: srari.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srari_b_test
+;
+@llvm_mips_srari_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srari_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srari_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srari_h_test:
+; CHECK: ld.h
+; CHECK: srari.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srari_h_test
+;
+@llvm_mips_srari_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srari_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srari_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srari_w_test:
+; CHECK: ld.w
+; CHECK: srari.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srari_w_test
+;
+@llvm_mips_srari_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srari_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srari_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srari_d_test:
+; CHECK: ld.d
+; CHECK: srari.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srari_d_test
+;
+@llvm_mips_srli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srli_b_test:
+; CHECK: ld.b
+; CHECK: srli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srli_b_test
+;
+@llvm_mips_srli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srli_h_test:
+; CHECK: ld.h
+; CHECK: srli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srli_h_test
+;
+@llvm_mips_srli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srli_w_test:
+; CHECK: ld.w
+; CHECK: srli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srli_w_test
+;
+@llvm_mips_srli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srli_d_test:
+; CHECK: ld.d
+; CHECK: srli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srli_d_test
+;
+@llvm_mips_srlri_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srlri_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srlri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_b_test:
+; CHECK: ld.b
+; CHECK: srlri.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srlri_b_test
+;
+@llvm_mips_srlri_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srlri_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srlri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_h_test:
+; CHECK: ld.h
+; CHECK: srlri.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srlri_h_test
+;
+@llvm_mips_srlri_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srlri_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srlri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_w_test:
+; CHECK: ld.w
+; CHECK: srlri.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srlri_w_test
+;
+@llvm_mips_srlri_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srlri_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srlri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_d_test:
+; CHECK: ld.d
+; CHECK: srlri.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srlri_d_test
+;
diff --git a/test/CodeGen/Mips/msa/bitcast.ll b/test/CodeGen/Mips/msa/bitcast.ll
new file mode 100644
index 0000000..8e880ec
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bitcast.ll
@@ -0,0 +1,1210 @@
+; Test the bitcast operation for big-endian and little-endian.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=BIGENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=LITENDIAN %s
+
+define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v16i8:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v16i8_to_v16i8
+
+; BIGENDIAN: v16i8_to_v16i8:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R3]],
+; BIGENDIAN: .size v16i8_to_v16i8
+
+define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v8i16:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v16i8_to_v8i16
+
+; BIGENDIAN: v16i8_to_v8i16:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v16i8_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v8f16:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.b [[R2]],
+; LITENDIAN: .size v16i8_to_v8f16
+
+; BIGENDIAN: v16i8_to_v8f16:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.b [[R2]],
+; BIGENDIAN: .size v16i8_to_v8f16
+
+define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v4i32:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v16i8_to_v4i32
+
+; BIGENDIAN: v16i8_to_v4i32:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v16i8_to_v4i32
+
+define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v4f32:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v16i8_to_v4f32
+
+; BIGENDIAN: v16i8_to_v4f32:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v16i8_to_v4f32
+
+define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v2i64:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v16i8_to_v2i64
+
+; BIGENDIAN: v16i8_to_v2i64:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v16i8_to_v2i64
+
+define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v2f64:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v16i8_to_v2f64
+
+; BIGENDIAN: v16i8_to_v2f64:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v16i8_to_v2f64
+
+define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v16i8:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v8i16_to_v16i8
+
+; BIGENDIAN: v8i16_to_v16i8:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v8i16_to_v16i8
+
+define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v8i16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v8i16_to_v8i16
+
+; BIGENDIAN: v8i16_to_v8i16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.h [[R3]],
+; BIGENDIAN: .size v8i16_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v8f16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.h [[R2]],
+; LITENDIAN: .size v8i16_to_v8f16
+
+; BIGENDIAN: v8i16_to_v8f16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.h [[R2]],
+; BIGENDIAN: .size v8i16_to_v8f16
+
+define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v4i32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v8i16_to_v4i32
+
+; BIGENDIAN: v8i16_to_v4i32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v8i16_to_v4i32
+
+define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v4f32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v8i16_to_v4f32
+
+; BIGENDIAN: v8i16_to_v4f32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v8i16_to_v4f32
+
+define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v2i64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v8i16_to_v2i64
+
+; BIGENDIAN: v8i16_to_v2i64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v8i16_to_v2i64
+
+define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v2f64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v8i16_to_v2f64
+
+; BIGENDIAN: v8i16_to_v2f64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v8i16_to_v2f64
+
+;----
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <16 x i8>
+ %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v16i8:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v8f16_to_v16i8
+
+; BIGENDIAN: v8f16_to_v16i8:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v8f16_to_v16i8
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v8i16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.h [[R2]],
+; LITENDIAN: .size v8f16_to_v8i16
+
+; BIGENDIAN: v8f16_to_v8i16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.h [[R2]],
+; BIGENDIAN: .size v8f16_to_v8i16
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <8 x half>
+ store <8 x half> %1, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v8f16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: st.h [[R1]],
+; LITENDIAN: .size v8f16_to_v8f16
+
+; BIGENDIAN: v8f16_to_v8f16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: st.h [[R1]],
+; BIGENDIAN: .size v8f16_to_v8f16
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v4i32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v8f16_to_v4i32
+
+; BIGENDIAN: v8f16_to_v4i32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v8f16_to_v4i32
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <4 x float>
+ %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v4f32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v8f16_to_v4f32
+
+; BIGENDIAN: v8f16_to_v4f32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v8f16_to_v4f32
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <2 x i64>
+ %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v2i64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v8f16_to_v2i64
+
+; BIGENDIAN: v8f16_to_v2i64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 27
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v8f16_to_v2i64
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <2 x double>
+ %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v2f64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v8f16_to_v2f64
+
+; BIGENDIAN: v8f16_to_v2f64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 27
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v8f16_to_v2f64
+;----
+
+define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v16i8:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v4i32_to_v16i8
+
+; BIGENDIAN: v4i32_to_v16i8:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v4i32_to_v16i8
+
+define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v8i16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v4i32_to_v8i16
+
+; BIGENDIAN: v4i32_to_v8i16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v4i32_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v8f16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v4i32_to_v8f16
+
+; BIGENDIAN: v4i32_to_v8f16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.w [[R2]],
+; BIGENDIAN: .size v4i32_to_v8f16
+
+define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v4i32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4i32_to_v4i32
+
+; BIGENDIAN: v4i32_to_v4i32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4i32_to_v4i32
+
+define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v4f32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4i32_to_v4f32
+
+; BIGENDIAN: v4i32_to_v4f32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4i32_to_v4f32
+
+define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v2i64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4i32_to_v2i64
+
+; BIGENDIAN: v4i32_to_v2i64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4i32_to_v2i64
+
+define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v2f64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4i32_to_v2f64
+
+; BIGENDIAN: v4i32_to_v2f64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4i32_to_v2f64
+
+define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v16i8:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v4f32_to_v16i8
+
+; BIGENDIAN: v4f32_to_v16i8:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v4f32_to_v16i8
+
+define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v8i16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v4f32_to_v8i16
+
+; BIGENDIAN: v4f32_to_v8i16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v4f32_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v8f16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v4f32_to_v8f16
+
+; BIGENDIAN: v4f32_to_v8f16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.w [[R2]],
+; BIGENDIAN: .size v4f32_to_v8f16
+
+define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v4i32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4f32_to_v4i32
+
+; BIGENDIAN: v4f32_to_v4i32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4f32_to_v4i32
+
+define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v4f32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4f32_to_v4f32
+
+; BIGENDIAN: v4f32_to_v4f32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4f32_to_v4f32
+
+define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v2i64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4f32_to_v2i64
+
+; BIGENDIAN: v4f32_to_v2i64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4f32_to_v2i64
+
+define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v2f64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4f32_to_v2f64
+
+; BIGENDIAN: v4f32_to_v2f64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4f32_to_v2f64
+
+define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v16i8:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v2i64_to_v16i8
+
+; BIGENDIAN: v2i64_to_v16i8:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v2i64_to_v16i8
+
+define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v8i16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v2i64_to_v8i16
+
+; BIGENDIAN: v2i64_to_v8i16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v2i64_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v8f16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v2i64_to_v8f16
+
+; BIGENDIAN: v2i64_to_v8f16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.d [[R2]],
+; BIGENDIAN: .size v2i64_to_v8f16
+
+define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v4i32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2i64_to_v4i32
+
+; BIGENDIAN: v2i64_to_v4i32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2i64_to_v4i32
+
+define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v4f32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2i64_to_v4f32
+
+; BIGENDIAN: v2i64_to_v4f32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2i64_to_v4f32
+
+define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v2i64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2i64_to_v2i64
+
+; BIGENDIAN: v2i64_to_v2i64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2i64_to_v2i64
+
+define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v2f64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2i64_to_v2f64
+
+; BIGENDIAN: v2i64_to_v2f64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2i64_to_v2f64
+
+define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v16i8:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v2f64_to_v16i8
+
+; BIGENDIAN: v2f64_to_v16i8:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v2f64_to_v16i8
+
+define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v8i16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v2f64_to_v8i16
+
+; BIGENDIAN: v2f64_to_v8i16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v2f64_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v8f16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v2f64_to_v8f16
+
+; BIGENDIAN: v2f64_to_v8f16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.d [[R2]],
+; BIGENDIAN: .size v2f64_to_v8f16
+
+define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v4i32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2f64_to_v4i32
+
+; BIGENDIAN: v2f64_to_v4i32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2f64_to_v4i32
+
+define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v4f32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2f64_to_v4f32
+
+; BIGENDIAN: v2f64_to_v4f32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2f64_to_v4f32
+
+define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v2i64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2f64_to_v2i64
+
+; BIGENDIAN: v2f64_to_v2i64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2f64_to_v2i64
+
+define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v2f64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2f64_to_v2f64
+
+; BIGENDIAN: v2f64_to_v2f64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2f64_to_v2f64
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll
new file mode 100644
index 0000000..9a88c47
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bitwise.ll
@@ -0,0 +1,1639 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: and_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v16i8
+}
+
+define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: and_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v8i16
+}
+
+define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: and_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v4i32
+}
+
+define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: and_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v2i64
+}
+
+define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: and_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: andi.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v16i8_i
+}
+
+define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: and_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v8i16_i
+}
+
+define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: and_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v4i32_i
+}
+
+define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: and_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v2i64_i
+}
+
+define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: or_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <16 x i8> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v16i8
+}
+
+define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: or_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <8 x i16> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v8i16
+}
+
+define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: or_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <4 x i32> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v4i32
+}
+
+define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: or_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <2 x i64> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v2i64
+}
+
+define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: or_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v16i8_i
+}
+
+define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: or_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v8i16_i
+}
+
+define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: or_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v4i32_i
+}
+
+define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: or_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 3, i64 3>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v2i64_i
+}
+
+define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: nor_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <16 x i8> %1, %2
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v16i8
+}
+
+define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: nor_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <8 x i16> %1, %2
+ %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v8i16
+}
+
+define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: nor_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <4 x i32> %1, %2
+ %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v4i32
+}
+
+define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: nor_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <2 x i64> %1, %2
+ %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v2i64
+}
+
+define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: nor_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v16i8_i
+}
+
+define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: nor_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v8i16_i
+}
+
+define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: nor_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v4i32_i
+}
+
+define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: nor_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 1, i64 1>
+ %3 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v2i64_i
+}
+
+define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: xor_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <16 x i8> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v16i8
+}
+
+define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: xor_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <8 x i16> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v8i16
+}
+
+define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: xor_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <4 x i32> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v4i32
+}
+
+define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: xor_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <2 x i64> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v2i64
+}
+
+define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: xor_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ; CHECK-DAG: xori.b [[R4:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v16i8_i
+}
+
+define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: xor_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v8i16_i
+}
+
+define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: xor_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v4i32_i
+}
+
+define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: xor_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> %1, <i64 3, i64 3>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v2i64_i
+}
+
+define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sll_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> %1, %2
+ ; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v16i8
+}
+
+define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sll_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> %1, %2
+ ; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v8i16
+}
+
+define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sll_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> %1, %2
+ ; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v4i32
+}
+
+define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sll_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> %1, %2
+ ; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v2i64
+}
+
+define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sll_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: slli.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v16i8_i
+}
+
+define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sll_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: slli.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v8i16_i
+}
+
+define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sll_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: slli.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v4i32_i
+}
+
+define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sll_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: slli.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v2i64_i
+}
+
+define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sra_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <16 x i8> %1, %2
+ ; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v16i8
+}
+
+define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sra_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <8 x i16> %1, %2
+ ; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v8i16
+}
+
+define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sra_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <4 x i32> %1, %2
+ ; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v4i32
+}
+
+define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sra_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <2 x i64> %1, %2
+ ; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v2i64
+}
+
+define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sra_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: srai.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v16i8_i
+}
+
+define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sra_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: srai.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v8i16_i
+}
+
+define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sra_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: srai.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v4i32_i
+}
+
+define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sra_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v2i64_i
+}
+
+define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: srl_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <16 x i8> %1, %2
+ ; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v16i8
+}
+
+define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: srl_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <8 x i16> %1, %2
+ ; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v8i16
+}
+
+define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: srl_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <4 x i32> %1, %2
+ ; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v4i32
+}
+
+define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: srl_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <2 x i64> %1, %2
+ ; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v2i64
+}
+
+define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: srl_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: srli.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v16i8_i
+}
+
+define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: srl_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: srli.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v8i16_i
+}
+
+define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: srl_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: srli.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v4i32_i
+}
+
+define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: srl_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: srli.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v2i64_i
+}
+
+define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ctpop_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1)
+ ; CHECK-DAG: pcnt.b [[R3:\$w[0-9]+]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v16i8
+}
+
+define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ctpop_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1)
+ ; CHECK-DAG: pcnt.h [[R3:\$w[0-9]+]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v8i16
+}
+
+define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ctpop_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1)
+ ; CHECK-DAG: pcnt.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v4i32
+}
+
+define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ctpop_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1)
+ ; CHECK-DAG: pcnt.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v2i64
+}
+
+define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ctlz_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
+ ; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v16i8
+}
+
+define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ctlz_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
+ ; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v8i16
+}
+
+define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ctlz_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
+ ; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v4i32
+}
+
+define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ctlz_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
+ ; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v2i64
+}
+
+define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind {
+ ; CHECK: bsel_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %m
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %5 = and <16 x i8> %1, %3
+ %6 = and <16 x i8> %2, %4
+ %7 = or <16 x i8> %5, %6
+ ; bmnz is the same operation
+ ; CHECK-DAG: bmnz.v [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %7, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v16i8
+}
+
+define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind {
+ ; CHECK: bsel_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %m
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($6)
+ %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %4 = and <16 x i8> %1, %3
+ %5 = and <16 x i8> <i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6>, %2
+ %6 = or <16 x i8> %4, %5
+ ; CHECK-DAG: bseli.b [[R3]], [[R1]], 6
+ store <16 x i8> %6, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v16i8_i
+}
+
+define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bsel_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6,
+ i16 6, i16 6, i16 6, i16 6>
+ %4 = and <8 x i16> %2, <i16 65529, i16 65529, i16 65529, i16 65529,
+ i16 65529, i16 65529, i16 65529, i16 65529>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v8i16
+}
+
+define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bsel_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6>
+ %4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v4i32
+}
+
+define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bsel_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 6, i64 6>
+ %4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609>
+ %5 = or <2 x i64> %3, %4
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v2i64
+}
+
+define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: binsl_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192>
+ %4 = and <16 x i8> %2, <i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63>
+ %5 = or <16 x i8> %3, %4
+ ; CHECK-DAG: binsli.b [[R2]], [[R1]], 2
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v16i8_i
+}
+
+define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: binsl_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152,
+ i16 49152, i16 49152, i16 49152, i16 49152>
+ %4 = and <8 x i16> %2, <i16 16383, i16 16383, i16 16383, i16 16383,
+ i16 16383, i16 16383, i16 16383, i16 16383>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: binsli.h [[R2]], [[R1]], 2
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v8i16_i
+}
+
+define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: binsl_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
+ %4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: binsli.w [[R2]], [[R1]], 2
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v4i32_i
+}
+
+define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: binsl_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608>
+ %4 = and <2 x i64> %2, <i64 7, i64 7>
+ %5 = or <2 x i64> %3, %4
+ ; TODO: We use a particularly wide mask here to work around a legalization
+ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets
+ ; legalized into a constant pool. We should add a test to cover the
+ ; other cases once they correctly select binsli.d.
+ ; CHECK-DAG: binsli.d [[R2]], [[R1]], 61
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v2i64_i
+}
+
+define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: binsr_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3,
+ i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %4 = and <16 x i8> %2, <i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252>
+ %5 = or <16 x i8> %3, %4
+ ; CHECK-DAG: binsri.b [[R2]], [[R1]], 2
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v16i8_i
+}
+
+define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: binsr_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3,
+ i16 3, i16 3, i16 3, i16 3>
+ %4 = and <8 x i16> %2, <i16 65532, i16 65532, i16 65532, i16 65532,
+ i16 65532, i16 65532, i16 65532, i16 65532>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: binsri.h [[R2]], [[R1]], 2
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v8i16_i
+}
+
+define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: binsr_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ %4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: binsri.w [[R2]], [[R1]], 2
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v4i32_i
+}
+
+define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: binsr_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 3, i64 3>
+ %4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
+ %5 = or <2 x i64> %3, %4
+ ; CHECK-DAG: binsri.d [[R2]], [[R1]], 2
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v2i64_i
+}
+
+define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bclr_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %5 = and <16 x i8> %1, %4
+ ; CHECK-DAG: bclr.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v16i8
+}
+
+define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bclr_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %5 = and <8 x i16> %1, %4
+ ; CHECK-DAG: bclr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v8i16
+}
+
+define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bclr_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %5 = and <4 x i32> %1, %4
+ ; CHECK-DAG: bclr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v4i32
+}
+
+define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bclr_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %1, %4
+ ; CHECK-DAG: bclr.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v2i64
+}
+
+define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bset_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = or <16 x i8> %1, %3
+ ; CHECK-DAG: bset.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v16i8
+}
+
+define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bset_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = or <8 x i16> %1, %3
+ ; CHECK-DAG: bset.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v8i16
+}
+
+define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bset_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = or <4 x i32> %1, %3
+ ; CHECK-DAG: bset.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v4i32
+}
+
+define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bset_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = or <2 x i64> %1, %3
+ ; CHECK-DAG: bset.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v2i64
+}
+
+define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bneg_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = xor <16 x i8> %1, %3
+ ; CHECK-DAG: bneg.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v16i8
+}
+
+define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bneg_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = xor <8 x i16> %1, %3
+ ; CHECK-DAG: bneg.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v8i16
+}
+
+define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bneg_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = xor <4 x i32> %1, %3
+ ; CHECK-DAG: bneg.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v4i32
+}
+
+define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bneg_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = xor <2 x i64> %1, %3
+ ; CHECK-DAG: bneg.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v2i64
+}
+
+define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bclri_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>,
+ <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <16 x i8> %1, %2
+ ; bclri.b and andi.b are exactly equivalent.
+ ; CHECK-DAG: andi.b [[R3:\$w[0-9]+]], [[R1]], 247
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v16i8
+}
+
+define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bclri_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>,
+ <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %3 = and <8 x i16> %1, %2
+ ; CHECK-DAG: bclri.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v8i16
+}
+
+define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bclri_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> <i32 8, i32 8, i32 8, i32 8>,
+ <i32 -1, i32 -1, i32 -1, i32 -1>
+ %3 = and <4 x i32> %1, %2
+ ; CHECK-DAG: bclri.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v4i32
+}
+
+define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bclri_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> <i64 8, i64 8>,
+ <i64 -1, i64 -1>
+ %3 = and <2 x i64> %1, %2
+ ; CHECK-DAG: bclri.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v2i64
+}
+
+define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bseti_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ ; CHECK-DAG: bseti.b [[R3:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v16i8
+}
+
+define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bseti_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ; CHECK-DAG: bseti.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v8i16
+}
+
+define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bseti_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
+ ; CHECK-DAG: bseti.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v4i32
+}
+
+define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bseti_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 8, i64 8>
+ ; CHECK-DAG: bseti.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v2i64
+}
+
+define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bnegi_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ ; CHECK-DAG: bnegi.b [[R3:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v16i8
+}
+
+define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bnegi_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ; CHECK-DAG: bnegi.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v8i16
+}
+
+define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bnegi_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
+ ; CHECK-DAG: bnegi.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v4i32
+}
+
+define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bnegi_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> %1, <i64 8, i64 8>
+ ; CHECK-DAG: bnegi.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v2i64
+}
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %val)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %val)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %val)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %val)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val)
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %val)
diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll
new file mode 100644
index 0000000..6408d7b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/compare.ll
@@ -0,0 +1,2079 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ceq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v16i8
+}
+
+define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ceq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v8i16
+}
+
+define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ceq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v4i32
+}
+
+define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ceq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v2i64
+}
+
+define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cle_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: cle_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v16i8
+}
+
+define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cle_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: cle_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v8i16
+}
+
+define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cle_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: cle_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v4i32
+}
+
+define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cle_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: cle_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v2i64
+}
+
+define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cle_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: cle_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v16i8
+}
+
+define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cle_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: cle_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v8i16
+}
+
+define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cle_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: cle_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v4i32
+}
+
+define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cle_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: cle_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v2i64
+}
+
+define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: clt_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: clt_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v16i8
+}
+
+define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: clt_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: clt_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v8i16
+}
+
+define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: clt_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: clt_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v4i32
+}
+
+define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: clt_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: clt_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v2i64
+}
+
+define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: clt_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: clt_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v16i8
+}
+
+define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: clt_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: clt_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v8i16
+}
+
+define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: clt_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: clt_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v4i32
+}
+
+define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: clt_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: clt_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v2i64
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cne_v16i8:
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; CHECK-DAG: xori.b [[R3]], [[R3]], 255
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v16i8
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cne_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v8i16
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cne_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v4i32
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cne_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v2i64
+}
+
+define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ceqi_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: ceqi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v16i8
+}
+
+define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ceqi_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: ceqi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v8i16
+}
+
+define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ceqi_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: ceqi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v4i32
+}
+
+define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ceqi_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: ceqi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v2i64
+}
+
+define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clei_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clei_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v16i8
+}
+
+define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clei_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clei_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v8i16
+}
+
+define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clei_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clei_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v4i32
+}
+
+define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clei_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clei_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v2i64
+}
+
+define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clei_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clei_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v16i8
+}
+
+define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clei_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clei_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v8i16
+}
+
+define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clei_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clei_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v4i32
+}
+
+define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clei_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clei_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v2i64
+}
+
+define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clti_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clti_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v16i8
+}
+
+define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clti_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clti_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v8i16
+}
+
+define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clti_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clti_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v4i32
+}
+
+define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clti_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clti_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v2i64
+}
+
+define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clti_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v16i8
+}
+
+define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clti_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v8i16
+}
+
+define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clti_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v4i32
+}
+
+define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clti_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v2i64
+}
+
+define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bsel_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
+ ; bmnz.v is the same operation
+ ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v16i8
+}
+
+define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bsel_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v8i16
+}
+
+define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bsel_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v4i32
+}
+
+define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bsel_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v2i64
+}
+
+define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bsel_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
+ ; bmnz.v is the same operation
+ ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v16i8
+}
+
+define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bsel_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v8i16
+}
+
+define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bsel_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v4i32
+}
+
+define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bsel_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v2i64
+}
+
+define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bseli_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
+ store <16 x i8> %4, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v16i8
+}
+
+define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bseli_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %4, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v8i16
+}
+
+define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bseli_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %4, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v4i32
+}
+
+define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bseli_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %4, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v2i64
+}
+
+define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bseli_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
+ store <16 x i8> %4, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v16i8
+}
+
+define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bseli_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %4, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v8i16
+}
+
+define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bseli_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %4, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v4i32
+}
+
+define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bseli_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %4, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v2i64
+}
+
+define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v16i8
+}
+
+define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v8i16
+}
+
+define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v4i32
+}
+
+define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v2i64
+}
+
+define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v16i8
+}
+
+define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v8i16
+}
+
+define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v4i32
+}
+
+define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v2i64
+}
+
+define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v16i8
+}
+
+define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v8i16
+}
+
+define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v4i32
+}
+
+define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v2i64
+}
+
+define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v16i8
+}
+
+define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v8i16
+}
+
+define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v4i32
+}
+
+define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v2i64
+}
+
+define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v16i8
+}
+
+define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v8i16
+}
+
+define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v4i32
+}
+
+define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v2i64
+}
+
+define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v16i8
+}
+
+define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v8i16
+}
+
+define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v4i32
+}
+
+define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v2i64
+}
+
+define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v16i8
+}
+
+define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v8i16
+}
+
+define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v4i32
+}
+
+define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v2i64
+}
+
+define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v16i8
+}
+
+define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v8i16
+}
+
+define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v4i32
+}
+
+define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v2i64
+}
+
+define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v16i8
+}
+
+define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v8i16
+}
+
+define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v4i32
+}
+
+define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v2i64
+}
+
+define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v16i8
+}
+
+define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v8i16
+}
+
+define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v4i32
+}
+
+define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v2i64
+}
+
+define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v16i8
+}
+
+define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v8i16
+}
+
+define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v4i32
+}
+
+define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v2i64
+}
+
+define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v16i8
+}
+
+define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v8i16
+}
+
+define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v4i32
+}
+
+define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v2i64
+}
+
+define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v16i8
+}
+
+define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v8i16
+}
+
+define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v4i32
+}
+
+define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v2i64
+}
+
+define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v16i8
+}
+
+define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v8i16
+}
+
+define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v4i32
+}
+
+define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v2i64
+}
+
+define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v16i8
+}
+
+define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v8i16
+}
+
+define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v4i32
+}
+
+define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v2i64
+}
+
+define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v16i8
+}
+
+define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v8i16
+}
+
+define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v4i32
+}
+
+define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v2i64
+}
diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll
new file mode 100644
index 0000000..2fc61f8
--- /dev/null
+++ b/test/CodeGen/Mips/msa/compare_float.ll
@@ -0,0 +1,663 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
+declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
+
+define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: false_v4f32:
+
+ %1 = load <4 x float>* %a
+ %2 = load <4 x float>* %b
+ %3 = fcmp false <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ store <4 x i32> %4, <4 x i32>* %c
+ ret void
+
+ ; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf:
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+ ; CHECK: .size false_v4f32
+}
+
+define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: false_v2f64:
+
+ %1 = load <2 x double>* %a
+ %2 = load <2 x double>* %b
+ %3 = fcmp false <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ store <2 x i64> %4, <2 x i64>* %c
+ ret void
+
+ ; FIXME: This code is correct, but poor. Ideally it would be similar to
+ ; the code in @false_v4f32
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
+ ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK: .size false_v2f64
+}
+
+define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: oeq_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oeq <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oeq_v4f32
+}
+
+define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: oeq_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oeq <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oeq_v2f64
+}
+
+define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: oge_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oge <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oge_v4f32
+}
+
+define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: oge_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oge <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oge_v2f64
+}
+
+define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ogt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ogt_v4f32
+}
+
+define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ogt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ogt_v2f64
+}
+
+define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ole_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ole <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ole_v4f32
+}
+
+define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ole_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ole <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ole_v2f64
+}
+
+define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: olt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp olt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size olt_v4f32
+}
+
+define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: olt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp olt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size olt_v2f64
+}
+
+define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: one_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp one <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcne.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size one_v4f32
+}
+
+define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: one_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp one <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcne.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size one_v2f64
+}
+
+define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ord_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ord <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcor.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ord_v4f32
+}
+
+define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ord_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ord <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcor.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ord_v2f64
+}
+
+define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ueq_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ueq <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcueq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ueq_v4f32
+}
+
+define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ueq_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ueq <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcueq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ueq_v2f64
+}
+
+define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: uge_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uge <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uge_v4f32
+}
+
+define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: uge_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uge <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uge_v2f64
+}
+
+define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ugt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ugt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ugt_v4f32
+}
+
+define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ugt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ugt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ugt_v2f64
+}
+
+define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ule_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ule <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ule_v4f32
+}
+
+define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ule_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ule <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ule_v2f64
+}
+
+define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ult_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ult <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ult_v4f32
+}
+
+define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ult_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ult <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ult_v2f64
+}
+
+define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: uno_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uno <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcun.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uno_v4f32
+}
+
+define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: uno_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uno <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcun.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uno_v2f64
+}
+
+define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: true_v4f32:
+
+ %1 = load <4 x float>* %a
+ %2 = load <4 x float>* %b
+ %3 = fcmp true <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ store <4 x i32> %4, <4 x i32>* %c
+ ret void
+
+ ; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf:
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+ ; CHECK: .size true_v4f32
+}
+
+define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: true_v2f64:
+
+ %1 = load <2 x double>* %a
+ %2 = load <2 x double>* %b
+ %3 = fcmp true <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ store <2 x i64> %4, <2 x i64>* %c
+ ret void
+
+ ; FIXME: This code is correct, but poor. Ideally it would be similar to
+ ; the code in @true_v4f32
+ ; CHECK-DAG: ldi.d [[R1:\$w[0-9]+]], 1
+ ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK: .size true_v2f64
+}
+
+define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: bsel_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = fcmp ogt <4 x float> %1, %2
+ ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x float> %5, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v4f32
+}
+
+define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: bsel_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = fcmp ogt <2 x double> %1, %2
+ ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x double> %5, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v2f64
+}
+
+define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: bseli_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <4 x float> %1, %2
+ ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ store <4 x float> %4, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_v4f32
+}
+
+define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: bseli_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <2 x double> %1, %2
+ ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ store <2 x double> %4, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_v2f64
+}
+
+define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: max_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
+ ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_v4f32
+}
+
+define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: max_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
+ ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_v2f64
+}
+
+define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: min_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
+ ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_v4f32
+}
+
+define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: min_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
+ ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_v2f64
+}
diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll
new file mode 100644
index 0000000..ed3e52c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_copy.ll
@@ -0,0 +1,162 @@
+; Test the MSA intrinsics that are encoded with the ELM instruction format and
+; are element extraction operations.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_copy_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_copy_s_b_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_copy_s_b_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_b_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_b_test:
+; CHECK: ld.b
+; CHECK: copy_s.b
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_b_test
+;
+@llvm_mips_copy_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_copy_s_h_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_copy_s_h_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_h_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_h_test:
+; CHECK: ld.h
+; CHECK: copy_s.h
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_h_test
+;
+@llvm_mips_copy_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_copy_s_w_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_copy_s_w_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_w_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_w_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_w_test
+;
+@llvm_mips_copy_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_copy_s_d_RES = global i64 0, align 16
+
+define void @llvm_mips_copy_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1
+ %1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
+ store i64 %1, i64* @llvm_mips_copy_s_d_RES
+ ret void
+}
+
+declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_d_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_d_test
+;
+@llvm_mips_copy_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_copy_u_b_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_copy_u_b_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_b_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_b_test:
+; CHECK: ld.b
+; CHECK: copy_u.b
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_b_test
+;
+@llvm_mips_copy_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_copy_u_h_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_copy_u_h_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_h_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_h_test:
+; CHECK: ld.h
+; CHECK: copy_u.h
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_h_test
+;
+@llvm_mips_copy_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_copy_u_w_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_copy_u_w_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_w_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_w_test:
+; CHECK: ld.w
+; CHECK: copy_u.w
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_w_test
+;
+@llvm_mips_copy_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_copy_u_d_RES = global i64 0, align 16
+
+define void @llvm_mips_copy_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1
+ %1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
+ store i64 %1, i64* @llvm_mips_copy_u_d_RES
+ ret void
+}
+
+declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_d_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_cxcmsa.ll b/test/CodeGen/Mips/msa/elm_cxcmsa.ll
new file mode 100644
index 0000000..8d6b0ee
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_cxcmsa.ll
@@ -0,0 +1,168 @@
+; Test the MSA ctcmsa and cfcmsa intrinsics (which are encoded with the ELM
+; instruction format).
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @msa_ir_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 0)
+ ret i32 %0
+}
+
+; CHECK: msa_ir_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $0
+; CHECK: .size msa_ir_cfcmsa_test
+;
+define i32 @msa_csr_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 1)
+ ret i32 %0
+}
+
+; CHECK: msa_csr_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $1
+; CHECK: .size msa_csr_cfcmsa_test
+;
+define i32 @msa_access_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 2)
+ ret i32 %0
+}
+
+; CHECK: msa_access_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $2
+; CHECK: .size msa_access_cfcmsa_test
+;
+define i32 @msa_save_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 3)
+ ret i32 %0
+}
+
+; CHECK: msa_save_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $3
+; CHECK: .size msa_save_cfcmsa_test
+;
+define i32 @msa_modify_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 4)
+ ret i32 %0
+}
+
+; CHECK: msa_modify_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $4
+; CHECK: .size msa_modify_cfcmsa_test
+;
+define i32 @msa_request_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 5)
+ ret i32 %0
+}
+
+; CHECK: msa_request_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $5
+; CHECK: .size msa_request_cfcmsa_test
+;
+define i32 @msa_map_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 6)
+ ret i32 %0
+}
+
+; CHECK: msa_map_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $6
+; CHECK: .size msa_map_cfcmsa_test
+;
+define i32 @msa_unmap_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 7)
+ ret i32 %0
+}
+
+; CHECK: msa_unmap_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $7
+; CHECK: .size msa_unmap_cfcmsa_test
+;
+define void @msa_ir_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 0, i32 1)
+ ret void
+}
+
+; CHECK: msa_ir_ctcmsa_test:
+; CHECK: ctcmsa $0
+; CHECK: .size msa_ir_ctcmsa_test
+;
+define void @msa_csr_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 1, i32 1)
+ ret void
+}
+
+; CHECK: msa_csr_ctcmsa_test:
+; CHECK: ctcmsa $1
+; CHECK: .size msa_csr_ctcmsa_test
+;
+define void @msa_access_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 2, i32 1)
+ ret void
+}
+
+; CHECK: msa_access_ctcmsa_test:
+; CHECK: ctcmsa $2
+; CHECK: .size msa_access_ctcmsa_test
+;
+define void @msa_save_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 3, i32 1)
+ ret void
+}
+
+; CHECK: msa_save_ctcmsa_test:
+; CHECK: ctcmsa $3
+; CHECK: .size msa_save_ctcmsa_test
+;
+define void @msa_modify_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 4, i32 1)
+ ret void
+}
+
+; CHECK: msa_modify_ctcmsa_test:
+; CHECK: ctcmsa $4
+; CHECK: .size msa_modify_ctcmsa_test
+;
+define void @msa_request_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 5, i32 1)
+ ret void
+}
+
+; CHECK: msa_request_ctcmsa_test:
+; CHECK: ctcmsa $5
+; CHECK: .size msa_request_ctcmsa_test
+;
+define void @msa_map_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 6, i32 1)
+ ret void
+}
+
+; CHECK: msa_map_ctcmsa_test:
+; CHECK: ctcmsa $6
+; CHECK: .size msa_map_ctcmsa_test
+;
+define void @msa_unmap_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 7, i32 1)
+ ret void
+}
+
+; CHECK: msa_unmap_ctcmsa_test:
+; CHECK: ctcmsa $7
+; CHECK: .size msa_unmap_ctcmsa_test
+;
+declare i32 @llvm.mips.cfcmsa(i32) nounwind
+declare void @llvm.mips.ctcmsa(i32, i32) nounwind
diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll
new file mode 100644
index 0000000..fa7ceaf
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_insv.ll
@@ -0,0 +1,192 @@
+; Test the MSA element insertion intrinsics that are encoded with the ELM
+; instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_insert_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_insert_b_ARG3 = global i32 27, align 16
+@llvm_mips_insert_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_insert_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_insert_b_ARG1
+ %1 = load i32* @llvm_mips_insert_b_ARG3
+ %2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.b [[R2]][1], [[R1]]
+; CHECK-DAG: st.b [[R2]], 0(
+; CHECK: .size llvm_mips_insert_b_test
+;
+@llvm_mips_insert_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_insert_h_ARG3 = global i32 27, align 16
+@llvm_mips_insert_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_insert_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_insert_h_ARG1
+ %1 = load i32* @llvm_mips_insert_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.h [[R2]][1], [[R1]]
+; CHECK-DAG: st.h [[R2]], 0(
+; CHECK: .size llvm_mips_insert_h_test
+;
+@llvm_mips_insert_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_insert_w_ARG3 = global i32 27, align 16
+@llvm_mips_insert_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_insert_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_insert_w_ARG1
+ %1 = load i32* @llvm_mips_insert_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.w [[R2]][1], [[R1]]
+; CHECK-DAG: st.w [[R2]], 0(
+; CHECK: .size llvm_mips_insert_w_test
+;
+@llvm_mips_insert_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_insert_d_ARG3 = global i64 27, align 16
+@llvm_mips_insert_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_insert_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_insert_d_ARG1
+ %1 = load i64* @llvm_mips_insert_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind
+
+; CHECK: llvm_mips_insert_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]],
+; CHECK-DAG: insert.w [[R3]][2], [[R1]]
+; CHECK-DAG: insert.w [[R3]][3], [[R2]]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_insert_d_test
+;
+@llvm_mips_insve_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_insve_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_insve_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_insve_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_insve_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_insve_b_ARG3
+ %2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_insve_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.b [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.b [[R3]],
+; CHECK: .size llvm_mips_insve_b_test
+;
+@llvm_mips_insve_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_insve_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_insve_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_insve_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_insve_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_insve_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_insve_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.h [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.h [[R3]],
+; CHECK: .size llvm_mips_insve_h_test
+;
+@llvm_mips_insve_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_insve_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_insve_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_insve_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_insve_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_insve_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_insve_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.w [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_insve_w_test
+;
+@llvm_mips_insve_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_insve_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_insve_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_insve_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_insve_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_insve_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.insve.d(<2 x i64>, i32, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_insve_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.d [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.d [[R3]],
+; CHECK: .size llvm_mips_insve_d_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_move.ll b/test/CodeGen/Mips/msa/elm_move.ll
new file mode 100644
index 0000000..98c06c7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_move.ll
@@ -0,0 +1,25 @@
+; Test the MSA move intrinsics (which are encoded with the ELM instruction
+; format).
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_move_vb_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_move_vb_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_move_vb_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_move_vb_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.move.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_move_vb_test:
+; CHECK: ld.b
+; CHECK: move.v
+; CHECK: st.b
+; CHECK: .size llvm_mips_move_vb_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll
new file mode 100644
index 0000000..39d670d
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll
@@ -0,0 +1,158 @@
+; Test the MSA intrinsics that are encoded with the ELM instruction format and
+; are either shifts or slides.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sldi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sldi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sldi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, i32 1)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sldi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_b_test:
+; CHECK: ld.b
+; CHECK: sldi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sldi_b_test
+;
+@llvm_mips_sldi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sldi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sldi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, i32 1)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sldi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_h_test:
+; CHECK: ld.h
+; CHECK: sldi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sldi_h_test
+;
+@llvm_mips_sldi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sldi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sldi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, i32 1)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sldi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_w_test:
+; CHECK: ld.w
+; CHECK: sldi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sldi_w_test
+;
+@llvm_mips_sldi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sldi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sldi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, i32 1)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sldi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_d_test:
+; CHECK: ld.d
+; CHECK: sldi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sldi_d_test
+;
+@llvm_mips_splati_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_splati_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splati_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_splati_b_test:
+; CHECK: ld.b
+; CHECK: splati.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_splati_b_test
+;
+@llvm_mips_splati_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_splati_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splati_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_splati_h_test:
+; CHECK: ld.h
+; CHECK: splati.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_splati_h_test
+;
+@llvm_mips_splati_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_splati_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splati_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_splati_w_test:
+; CHECK: ld.w
+; CHECK: splati.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_splati_w_test
+;
+@llvm_mips_splati_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_splati_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splati_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_splati_d_test:
+; CHECK: ld.d
+; CHECK: splati.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_splati_d_test
+;
diff --git a/test/CodeGen/Mips/msa/endian.ll b/test/CodeGen/Mips/msa/endian.ll
new file mode 100644
index 0000000..44d1925
--- /dev/null
+++ b/test/CodeGen/Mips/msa/endian.ll
@@ -0,0 +1,107 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=BIGENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=LITENDIAN %s
+
+@v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+@v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+@v4i32 = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+@v2i64 = global <2 x i64> <i64 0, i64 0>
+
+define void @const_v16i8() nounwind {
+ ; LITENDIAN: .byte 0
+ ; LITENDIAN: .byte 1
+ ; LITENDIAN: .byte 2
+ ; LITENDIAN: .byte 3
+ ; LITENDIAN: .byte 4
+ ; LITENDIAN: .byte 5
+ ; LITENDIAN: .byte 6
+ ; LITENDIAN: .byte 7
+ ; LITENDIAN: .byte 8
+ ; LITENDIAN: .byte 9
+ ; LITENDIAN: .byte 10
+ ; LITENDIAN: .byte 11
+ ; LITENDIAN: .byte 12
+ ; LITENDIAN: .byte 13
+ ; LITENDIAN: .byte 14
+ ; LITENDIAN: .byte 15
+ ; LITENDIAN: const_v16i8:
+ ; BIGENDIAN: .byte 0
+ ; BIGENDIAN: .byte 1
+ ; BIGENDIAN: .byte 2
+ ; BIGENDIAN: .byte 3
+ ; BIGENDIAN: .byte 4
+ ; BIGENDIAN: .byte 5
+ ; BIGENDIAN: .byte 6
+ ; BIGENDIAN: .byte 7
+ ; BIGENDIAN: .byte 8
+ ; BIGENDIAN: .byte 9
+ ; BIGENDIAN: .byte 10
+ ; BIGENDIAN: .byte 11
+ ; BIGENDIAN: .byte 12
+ ; BIGENDIAN: .byte 13
+ ; BIGENDIAN: .byte 14
+ ; BIGENDIAN: .byte 15
+ ; BIGENDIAN: const_v16i8:
+
+ store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8>*@v16i8
+
+ ret void
+}
+
+define void @const_v8i16() nounwind {
+ ; LITENDIAN: .2byte 0
+ ; LITENDIAN: .2byte 1
+ ; LITENDIAN: .2byte 2
+ ; LITENDIAN: .2byte 3
+ ; LITENDIAN: .2byte 4
+ ; LITENDIAN: .2byte 5
+ ; LITENDIAN: .2byte 6
+ ; LITENDIAN: .2byte 7
+ ; LITENDIAN: const_v8i16:
+ ; BIGENDIAN: .2byte 0
+ ; BIGENDIAN: .2byte 1
+ ; BIGENDIAN: .2byte 2
+ ; BIGENDIAN: .2byte 3
+ ; BIGENDIAN: .2byte 4
+ ; BIGENDIAN: .2byte 5
+ ; BIGENDIAN: .2byte 6
+ ; BIGENDIAN: .2byte 7
+ ; BIGENDIAN: const_v8i16:
+
+ store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16>*@v8i16
+
+ ret void
+}
+
+define void @const_v4i32() nounwind {
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: .4byte 1
+ ; LITENDIAN: .4byte 2
+ ; LITENDIAN: .4byte 3
+ ; LITENDIAN: const_v4i32:
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 1
+ ; BIGENDIAN: .4byte 2
+ ; BIGENDIAN: .4byte 3
+ ; BIGENDIAN: const_v4i32:
+
+ store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>*@v4i32
+
+ ret void
+}
+
+define void @const_v2i64() nounwind {
+ ; LITENDIAN: .4byte 1
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: .4byte 2
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: const_v2i64:
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 1
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 2
+ ; BIGENDIAN: const_v2i64:
+
+ store volatile <2 x i64> <i64 1, i64 2>, <2 x i64>*@v2i64
+
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll
new file mode 100644
index 0000000..3088e1b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/frameindex.ll
@@ -0,0 +1,85 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-BE %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-LE %s
+
+define void @loadstore_v16i8_near() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_near:
+
+ %1 = alloca <16 x i8>
+ %2 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0($sp)
+ store volatile <16 x i8> %2, <16 x i8>* %1
+ ; MIPS32-AE: st.b [[R1]], 0($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_near
+}
+
+define void @loadstore_v16i8_just_under_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_under_simm10:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [496 x i8] ; Push the frame right up to 512 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 496($sp)
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: st.b [[R1]], 496($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_under_simm10
+}
+
+define void @loadstore_v16i8_just_over_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_over_simm10:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [497 x i8] ; Push the frame just over 512 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 512
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 512
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_over_simm10
+}
+
+define void @loadstore_v16i8_just_under_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_under_simm16:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_under_simm16
+}
+
+define void @loadstore_v16i8_just_over_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_over_simm16:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_over_simm16
+}
diff --git a/test/CodeGen/Mips/msa/i10.ll b/test/CodeGen/Mips/msa/i10.ll
new file mode 100644
index 0000000..c5a9617
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i10.ll
@@ -0,0 +1,89 @@
+; Test the MSA intrinsics that are encoded with the I10 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bnz_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bnz_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnz_b_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bnz_b_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.b [[R0]]
+; CHECK: .size llvm_mips_bnz_b_test
+
+@llvm_mips_bnz_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+
+define i32 @llvm_mips_bnz_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bnz_h_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_bnz_h_test:
+; CHECK-DAG: ld.h [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.h [[R0]]
+; CHECK: .size llvm_mips_bnz_h_test
+
+@llvm_mips_bnz_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+
+define i32 @llvm_mips_bnz_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bnz_w_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_bnz_w_test:
+; CHECK-DAG: ld.w [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.w [[R0]]
+; CHECK: .size llvm_mips_bnz_w_test
+
+@llvm_mips_bnz_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+
+define i32 @llvm_mips_bnz_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bnz_d_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_bnz_d_test:
+; CHECK-DAG: ld.d [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.d [[R0]]
+; CHECK: .size llvm_mips_bnz_d_test
+
diff --git a/test/CodeGen/Mips/msa/i5-a.ll b/test/CodeGen/Mips/msa/i5-a.ll
new file mode 100644
index 0000000..0b50720
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-a.ll
@@ -0,0 +1,82 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'a'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_addvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_addvi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_addvi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addvi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_b_test:
+; CHECK: ld.b
+; CHECK: addvi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_addvi_b_test
+;
+@llvm_mips_addvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_addvi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_addvi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addvi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_h_test:
+; CHECK: ld.h
+; CHECK: addvi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_addvi_h_test
+;
+@llvm_mips_addvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_addvi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_addvi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addvi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_w_test:
+; CHECK: ld.w
+; CHECK: addvi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_addvi_w_test
+;
+@llvm_mips_addvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_addvi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_addvi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addvi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_d_test:
+; CHECK: ld.d
+; CHECK: addvi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_addvi_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-b.ll b/test/CodeGen/Mips/msa/i5-b.ll
new file mode 100644
index 0000000..da6be66
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-b.ll
@@ -0,0 +1,439 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'b'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bclri_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bclri_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bclri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bclri_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_b_test:
+; CHECK: ld.b
+; andi.b is equivalent to bclri.b
+; CHECK: andi.b {{\$w[0-9]}}, {{\$w[0-9]}}, 127
+; CHECK: st.b
+; CHECK: .size llvm_mips_bclri_b_test
+;
+@llvm_mips_bclri_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bclri_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bclri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bclri_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_h_test:
+; CHECK: ld.h
+; CHECK: bclri.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bclri_h_test
+;
+@llvm_mips_bclri_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bclri_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bclri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bclri_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_w_test:
+; CHECK: ld.w
+; CHECK: bclri.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bclri_w_test
+;
+@llvm_mips_bclri_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bclri_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bclri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bclri_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_d_test:
+; CHECK: ld.d
+; CHECK: bclri.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bclri_d_test
+;
+@llvm_mips_binsli_b_ARG1 = global <16 x i8> zeroinitializer, align 16
+@llvm_mips_binsli_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsli_b_RES = global <16 x i8> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsli_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsli_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.b [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_b_RES)(
+; CHECK-DAG: st.b [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_b_test
+
+@llvm_mips_binsli_h_ARG1 = global <8 x i16> zeroinitializer, align 16
+@llvm_mips_binsli_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsli_h_RES = global <8 x i16> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsli_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsli_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_h_ARG2)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.h [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_h_RES)(
+; CHECK-DAG: st.h [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_h_test
+
+@llvm_mips_binsli_w_ARG1 = global <4 x i32> zeroinitializer, align 16
+@llvm_mips_binsli_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsli_w_RES = global <4 x i32> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsli_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsli_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_w_ARG2)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.w [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_w_RES)(
+; CHECK-DAG: st.w [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_w_test
+
+@llvm_mips_binsli_d_ARG1 = global <2 x i64> zeroinitializer, align 16
+@llvm_mips_binsli_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsli_d_RES = global <2 x i64> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsli_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsli_d_ARG2
+ ; TODO: We use a particularly wide mask here to work around a legalization
+ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets
+ ; legalized into a constant pool. We should add a test to cover the
+ ; other cases once they correctly select binsli.d.
+ %2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 61)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_binsli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_d_ARG2)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.d [[R3]], [[R4]], 61
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_d_RES)(
+; CHECK-DAG: st.d [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_d_test
+
+@llvm_mips_binsri_b_ARG1 = global <16 x i8> zeroinitializer, align 16
+@llvm_mips_binsri_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsri_b_RES = global <16 x i8> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsri_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsri_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.b [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_b_RES)(
+; CHECK-DAG: st.b [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_b_test
+
+@llvm_mips_binsri_h_ARG1 = global <8 x i16> zeroinitializer, align 16
+@llvm_mips_binsri_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsri_h_RES = global <8 x i16> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsri_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsri_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_h_ARG2)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.h [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_h_RES)(
+; CHECK-DAG: st.h [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_h_test
+
+@llvm_mips_binsri_w_ARG1 = global <4 x i32> zeroinitializer, align 16
+@llvm_mips_binsri_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsri_w_RES = global <4 x i32> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsri_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsri_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_w_ARG2)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.w [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_w_RES)(
+; CHECK-DAG: st.w [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_w_test
+
+@llvm_mips_binsri_d_ARG1 = global <2 x i64> zeroinitializer, align 16
+@llvm_mips_binsri_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsri_d_RES = global <2 x i64> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsri_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsri_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_d_ARG2)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.d [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_d_RES)(
+; CHECK-DAG: st.d [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_d_test
+
+@llvm_mips_bnegi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bnegi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bnegi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnegi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_b_test:
+; CHECK: ld.b
+; CHECK: bnegi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bnegi_b_test
+;
+@llvm_mips_bnegi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bnegi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bnegi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bnegi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_h_test:
+; CHECK: ld.h
+; CHECK: bnegi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bnegi_h_test
+;
+@llvm_mips_bnegi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bnegi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bnegi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bnegi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_w_test:
+; CHECK: ld.w
+; CHECK: bnegi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bnegi_w_test
+;
+@llvm_mips_bnegi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bnegi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bnegi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bnegi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_d_test:
+; CHECK: ld.d
+; CHECK: bnegi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bnegi_d_test
+;
+@llvm_mips_bseti_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bseti_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bseti_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bseti_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_b_test:
+; CHECK: ld.b
+; CHECK: bseti.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bseti_b_test
+;
+@llvm_mips_bseti_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bseti_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bseti_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bseti_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_h_test:
+; CHECK: ld.h
+; CHECK: bseti.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bseti_h_test
+;
+@llvm_mips_bseti_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bseti_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bseti_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bseti_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_w_test:
+; CHECK: ld.w
+; CHECK: bseti.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bseti_w_test
+;
+@llvm_mips_bseti_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bseti_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bseti_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bseti_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_d_test:
+; CHECK: ld.d
+; CHECK: bseti.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bseti_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-c.ll b/test/CodeGen/Mips/msa/i5-c.ll
new file mode 100644
index 0000000..bf1578f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-c.ll
@@ -0,0 +1,386 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'c'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ceqi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ceqi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ceqi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ceqi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_b_test:
+; CHECK: ld.b
+; CHECK: ceqi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ceqi_b_test
+;
+@llvm_mips_ceqi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ceqi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ceqi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ceqi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_h_test:
+; CHECK: ld.h
+; CHECK: ceqi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ceqi_h_test
+;
+@llvm_mips_ceqi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ceqi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ceqi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ceqi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_w_test:
+; CHECK: ld.w
+; CHECK: ceqi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ceqi_w_test
+;
+@llvm_mips_ceqi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ceqi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ceqi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ceqi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_d_test:
+; CHECK: ld.d
+; CHECK: ceqi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ceqi_d_test
+;
+@llvm_mips_clei_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clei_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clei_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clei_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_b_test:
+; CHECK: ld.b
+; CHECK: clei_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clei_s_b_test
+;
+@llvm_mips_clei_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clei_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clei_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clei_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_h_test:
+; CHECK: ld.h
+; CHECK: clei_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clei_s_h_test
+;
+@llvm_mips_clei_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clei_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clei_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clei_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_w_test:
+; CHECK: ld.w
+; CHECK: clei_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clei_s_w_test
+;
+@llvm_mips_clei_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clei_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clei_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clei_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_d_test:
+; CHECK: ld.d
+; CHECK: clei_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clei_s_d_test
+;
+@llvm_mips_clei_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clei_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clei_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clei_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_b_test:
+; CHECK: ld.b
+; CHECK: clei_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clei_u_b_test
+;
+@llvm_mips_clei_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clei_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clei_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clei_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_h_test:
+; CHECK: ld.h
+; CHECK: clei_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clei_u_h_test
+;
+@llvm_mips_clei_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clei_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clei_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clei_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_w_test:
+; CHECK: ld.w
+; CHECK: clei_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clei_u_w_test
+;
+@llvm_mips_clei_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clei_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clei_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clei_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_d_test:
+; CHECK: ld.d
+; CHECK: clei_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clei_u_d_test
+;
+@llvm_mips_clti_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clti_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clti_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clti_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_b_test:
+; CHECK: ld.b
+; CHECK: clti_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clti_s_b_test
+;
+@llvm_mips_clti_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clti_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clti_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clti_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_h_test:
+; CHECK: ld.h
+; CHECK: clti_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clti_s_h_test
+;
+@llvm_mips_clti_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clti_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clti_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clti_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_w_test:
+; CHECK: ld.w
+; CHECK: clti_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clti_s_w_test
+;
+@llvm_mips_clti_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clti_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clti_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clti_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_d_test:
+; CHECK: ld.d
+; CHECK: clti_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clti_s_d_test
+;
+@llvm_mips_clti_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clti_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clti_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clti_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_b_test:
+; CHECK: ld.b
+; CHECK: clti_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clti_u_b_test
+;
+@llvm_mips_clti_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clti_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clti_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clti_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_h_test:
+; CHECK: ld.h
+; CHECK: clti_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clti_u_h_test
+;
+@llvm_mips_clti_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clti_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clti_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clti_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_w_test:
+; CHECK: ld.w
+; CHECK: clti_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clti_u_w_test
+;
+@llvm_mips_clti_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clti_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clti_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clti_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clti.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_d_test:
+; CHECK: ld.d
+; CHECK: clti_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clti_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-m.ll b/test/CodeGen/Mips/msa/i5-m.ll
new file mode 100644
index 0000000..2766349
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-m.ll
@@ -0,0 +1,310 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'm'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_maxi_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maxi_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_b_test:
+; CHECK: ld.b
+; CHECK: maxi_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_s_b_test
+;
+@llvm_mips_maxi_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maxi_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_h_test:
+; CHECK: ld.h
+; CHECK: maxi_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_s_h_test
+;
+@llvm_mips_maxi_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maxi_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_w_test:
+; CHECK: ld.w
+; CHECK: maxi_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_s_w_test
+;
+@llvm_mips_maxi_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maxi_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_d_test:
+; CHECK: ld.d
+; CHECK: maxi_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_s_d_test
+;
+@llvm_mips_maxi_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maxi_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_b_test:
+; CHECK: ld.b
+; CHECK: maxi_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_u_b_test
+;
+@llvm_mips_maxi_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maxi_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_h_test:
+; CHECK: ld.h
+; CHECK: maxi_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_u_h_test
+;
+@llvm_mips_maxi_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maxi_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_w_test:
+; CHECK: ld.w
+; CHECK: maxi_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_u_w_test
+;
+@llvm_mips_maxi_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maxi_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_d_test:
+; CHECK: ld.d
+; CHECK: maxi_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_u_d_test
+;
+@llvm_mips_mini_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mini_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_b_test:
+; CHECK: ld.b
+; CHECK: mini_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_s_b_test
+;
+@llvm_mips_mini_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mini_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_h_test:
+; CHECK: ld.h
+; CHECK: mini_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_s_h_test
+;
+@llvm_mips_mini_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mini_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_w_test:
+; CHECK: ld.w
+; CHECK: mini_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_s_w_test
+;
+@llvm_mips_mini_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mini_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_d_test:
+; CHECK: ld.d
+; CHECK: mini_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_s_d_test
+;
+@llvm_mips_mini_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mini_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_b_test:
+; CHECK: ld.b
+; CHECK: mini_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_u_b_test
+;
+@llvm_mips_mini_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mini_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_h_test:
+; CHECK: ld.h
+; CHECK: mini_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_u_h_test
+;
+@llvm_mips_mini_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mini_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_w_test:
+; CHECK: ld.w
+; CHECK: mini_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_u_w_test
+;
+@llvm_mips_mini_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mini_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_d_test:
+; CHECK: ld.d
+; CHECK: mini_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-s.ll b/test/CodeGen/Mips/msa/i5-s.ll
new file mode 100644
index 0000000..184172f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-s.ll
@@ -0,0 +1,82 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 's'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_subvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subvi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subvi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_b_test:
+; CHECK: ld.b
+; CHECK: subvi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subvi_b_test
+;
+@llvm_mips_subvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subvi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subvi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_h_test:
+; CHECK: ld.h
+; CHECK: subvi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subvi_h_test
+;
+@llvm_mips_subvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subvi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subvi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_w_test:
+; CHECK: ld.w
+; CHECK: subvi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subvi_w_test
+;
+@llvm_mips_subvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subvi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subvi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_d_test:
+; CHECK: ld.d
+; CHECK: subvi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subvi_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5_ld_st.ll b/test/CodeGen/Mips/msa/i5_ld_st.ll
new file mode 100644
index 0000000..7cc55f2
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5_ld_st.ll
@@ -0,0 +1,150 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format and
+; are loads or stores.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ld_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ld_b_test() nounwind {
+entry:
+ %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
+ %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 16)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_b_test:
+; CHECK: ld.b [[R1:\$w[0-9]+]], 16(
+; CHECK: st.b
+; CHECK: .size llvm_mips_ld_b_test
+;
+@llvm_mips_ld_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ld_h_test() nounwind {
+entry:
+ %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
+ %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 16)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_h_test:
+; CHECK: ld.h [[R1:\$w[0-9]+]], 16(
+; CHECK: st.h
+; CHECK: .size llvm_mips_ld_h_test
+;
+@llvm_mips_ld_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ld_w_test() nounwind {
+entry:
+ %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
+ %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 16)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_w_test:
+; CHECK: ld.w [[R1:\$w[0-9]+]], 16(
+; CHECK: st.w
+; CHECK: .size llvm_mips_ld_w_test
+;
+@llvm_mips_ld_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ld_d_test() nounwind {
+entry:
+ %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
+ %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 16)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_d_test:
+; CHECK: ld.d [[R1:\$w[0-9]+]], 16(
+; CHECK: st.d
+; CHECK: .size llvm_mips_ld_d_test
+;
+@llvm_mips_st_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_st_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_st_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_st_b_ARG
+ %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
+ tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_b_test:
+; CHECK: ld.b
+; CHECK: st.b [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_b_test
+;
+@llvm_mips_st_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_st_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_st_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_st_h_ARG
+ %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
+ tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_h_test:
+; CHECK: ld.h
+; CHECK: st.h [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_h_test
+;
+@llvm_mips_st_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_st_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_st_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_st_w_ARG
+ %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
+ tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_w_test:
+; CHECK: ld.w
+; CHECK: st.w [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_w_test
+;
+@llvm_mips_st_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_st_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_st_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_st_d_ARG
+ %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
+ tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_d_test:
+; CHECK: ld.d
+; CHECK: st.d [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i8.ll b/test/CodeGen/Mips/msa/i8.ll
new file mode 100644
index 0000000..d2931a7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i8.ll
@@ -0,0 +1,211 @@
+; Test the MSA intrinsics that are encoded with the I8 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_andi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_andi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_andi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_andi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_andi_b_test:
+; CHECK: ld.b
+; CHECK: andi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_andi_b_test
+
+@llvm_mips_bmnzi_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bmnzi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnzi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmnzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bmnzi_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: bmnzi.b [[R3]], [[R4]], 25
+; CHECK-DAG: st.b [[R3]], 0(
+; CHECK: .size llvm_mips_bmnzi_b_test
+
+@llvm_mips_bmzi_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bmzi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmzi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmzi_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bmzi_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmzi_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmzi_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; bmnzi.b is the same as bmzi.b with ws and wd_in swapped
+; CHECK-DAG: bmnzi.b [[R4]], [[R3]], 25
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_bmzi_b_test
+
+@llvm_mips_bseli_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bseli_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bseli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bseli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bseli_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bseli_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bseli_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bseli_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bseli_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: bseli.b [[R3]], [[R4]], 25
+; CHECK-DAG: st.b [[R3]], 0(
+; CHECK: .size llvm_mips_bseli_b_test
+
+@llvm_mips_nori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_nori_b_test:
+; CHECK: ld.b
+; CHECK: nori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_nori_b_test
+;
+@llvm_mips_ori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_ori_b_test:
+; CHECK: ld.b
+; CHECK: ori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ori_b_test
+;
+@llvm_mips_shf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_shf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_shf_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_shf_b_test:
+; CHECK: ld.b
+; CHECK: shf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_shf_b_test
+;
+@llvm_mips_shf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_shf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_shf_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_shf_h_test:
+; CHECK: ld.h
+; CHECK: shf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_shf_h_test
+;
+@llvm_mips_shf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_shf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_shf_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_shf_w_test:
+; CHECK: ld.w
+; CHECK: shf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_shf_w_test
+;
+@llvm_mips_xori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_xori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_xori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.xori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_xori_b_test:
+; CHECK: ld.b
+; CHECK: xori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_xori_b_test
+;
diff --git a/test/CodeGen/Mips/msa/inline-asm.ll b/test/CodeGen/Mips/msa/inline-asm.ll
new file mode 100644
index 0000000..4a34273
--- /dev/null
+++ b/test/CodeGen/Mips/msa/inline-asm.ll
@@ -0,0 +1,34 @@
+; A basic inline assembly test
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@v4i32_r = global <4 x i32> zeroinitializer, align 16
+
+define void @test1() nounwind {
+entry:
+ ; CHECK-LABEL: test1:
+ %0 = call <4 x i32> asm "ldi.w ${0:w}, 1", "=f"()
+ ; CHECK: ldi.w $w{{[1-3]?[0-9]}}, 1
+ store <4 x i32> %0, <4 x i32>* @v4i32_r
+ ret void
+}
+
+define void @test2() nounwind {
+entry:
+ ; CHECK-LABEL: test2:
+ %0 = load <4 x i32>* @v4i32_r
+ %1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0)
+ ; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1
+ store <4 x i32> %1, <4 x i32>* @v4i32_r
+ ret void
+}
+
+define void @test3() nounwind {
+entry:
+ ; CHECK-LABEL: test3:
+ %0 = load <4 x i32>* @v4i32_r
+ %1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0)
+ ; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1
+ store <4 x i32> %1, <4 x i32>* @v4i32_r
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
new file mode 100644
index 0000000..4beaaa9
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
@@ -0,0 +1,134 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Unexpected illegal type!" assertion.
+; It should at least successfully build.
+
+define void @autogen_SD1704963983(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <4 x double>
+ %A3 = alloca <8 x i64>
+ %A2 = alloca <1 x double>
+ %A1 = alloca double
+ %A = alloca i32
+ %L = load i8* %0
+ store i8 77, i8* %0
+ %E = extractelement <8 x i64> zeroinitializer, i32 2
+ %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3>
+ %I = insertelement <8 x i64> zeroinitializer, i64 %E, i32 7
+ %Sl = select i1 false, i8* %0, i8* %0
+ %Cmp = icmp eq i32 434069, 272505
+ br label %CF
+
+CF: ; preds = %CF, %CF78, %BB
+ %L5 = load i8* %Sl
+ store i8 %L, i8* %Sl
+ %E6 = extractelement <8 x i32> zeroinitializer, i32 2
+ %Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
+ %I8 = insertelement <8 x i64> zeroinitializer, i64 %4, i32 7
+ %B = shl <1 x i16> zeroinitializer, zeroinitializer
+ %FC = sitofp <8 x i64> zeroinitializer to <8 x float>
+ %Sl9 = select i1 %Cmp, i8 77, i8 77
+ %Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer
+ %L11 = load i8* %0
+ store i8 %Sl9, i8* %0
+ %E12 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7>
+ %I14 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
+ %B15 = udiv <1 x i16> %B, zeroinitializer
+ %Tr = trunc <8 x i64> %Shuff to <8 x i32>
+ %Sl16 = select i1 %Cmp, i8 77, i8 %5
+ %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10
+ %L18 = load i8* %Sl
+ store i8 -1, i8* %Sl
+ %E19 = extractelement <8 x i32> zeroinitializer, i32 3
+ %Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef>
+ %I21 = insertelement <8 x i64> %Shuff13, i64 %E, i32 0
+ %B22 = urem <8 x i64> %Shuff7, %I21
+ %FC23 = sitofp i32 50347 to float
+ %Sl24 = select i1 %Cmp, double 0.000000e+00, double 0.000000e+00
+ %Cmp25 = icmp ugt i32 465489, 47533
+ br i1 %Cmp25, label %CF, label %CF78
+
+CF78: ; preds = %CF
+ %L26 = load i8* %Sl
+ store i32 50347, i32* %A
+ %E27 = extractelement <8 x i1> %Cmp10, i32 2
+ br i1 %E27, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF81, %CF78
+ %Shuff28 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
+ %I29 = insertelement <1 x i16> zeroinitializer, i16 -1, i32 0
+ %B30 = urem <8 x i32> %Tr, zeroinitializer
+ %Tr31 = trunc i32 0 to i16
+ %Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
+ %L33 = load i8* %Sl
+ store i8 %L26, i8* %Sl
+ %E34 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef
+ %I36 = insertelement <8 x i64> %Shuff28, i64 %E, i32 7
+ %B37 = srem <1 x i16> %I29, zeroinitializer
+ %FC38 = sitofp <8 x i32> %B30 to <8 x double>
+ %Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24
+ %L40 = load i8* %Sl
+ store i8 %Sl16, i8* %Sl
+ %E41 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12>
+ %I43 = insertelement <4 x i32> zeroinitializer, i32 272505, i32 0
+ %B44 = urem <8 x i32> %B30, %Tr
+ %PC = bitcast i8* %0 to i64*
+ %Sl45 = select i1 %Cmp, <8 x i1> %Cmp10, <8 x i1> %Shuff42
+ %Cmp46 = fcmp ugt float 0xB856238A00000000, 0x47DA795E40000000
+ br i1 %Cmp46, label %CF77, label %CF80
+
+CF80: ; preds = %CF80, %CF77
+ %L47 = load i64* %PC
+ store i8 77, i8* %Sl
+ %E48 = extractelement <8 x i64> zeroinitializer, i32 2
+ %Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3>
+ %I50 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
+ %B51 = fdiv float 0x46CC2D8000000000, %FC23
+ %PC52 = bitcast <8 x i64>* %A3 to i64*
+ %Sl53 = select i1 %Cmp, <8 x i64> %Shuff, <8 x i64> %Shuff
+ %Cmp54 = fcmp ole float 0x47DA795E40000000, 0xB856238A00000000
+ br i1 %Cmp54, label %CF80, label %CF81
+
+CF81: ; preds = %CF80
+ %L55 = load i8* %Sl
+ store i8 %Sl16, i8* %Sl
+ %E56 = extractelement <1 x i16> %B, i32 0
+ %Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1>
+ %I58 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
+ %B59 = srem i32 %E19, %E19
+ %Sl60 = select i1 %Cmp, i8 77, i8 77
+ %Cmp61 = icmp ult <1 x i16> zeroinitializer, %B
+ %L62 = load i8* %Sl
+ store i64 %L47, i64* %PC52
+ %E63 = extractelement <4 x i32> %I43, i32 2
+ %Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
+ %I65 = insertelement <8 x i64> %B22, i64 %L47, i32 7
+ %B66 = add <8 x i64> %I50, %I65
+ %FC67 = uitofp i16 %E12 to float
+ %Sl68 = select i1 %Cmp, <8 x i32> %B30, <8 x i32> zeroinitializer
+ %Cmp69 = fcmp ord double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp69, label %CF77, label %CF79
+
+CF79: ; preds = %CF81
+ %L70 = load i32* %A
+ store i64 %4, i64* %PC
+ %E71 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9>
+ %I73 = insertelement <8 x i16> zeroinitializer, i16 %E12, i32 5
+ %B74 = fsub double 0.000000e+00, 0.000000e+00
+ %Sl75 = select i1 %Cmp46, i32 %E6, i32 %E19
+ %Cmp76 = icmp ugt <4 x i32> %I43, zeroinitializer
+ store i8 %L, i8* %Sl
+ store i64 %L47, i64* %PC
+ store i64 %L47, i64* %PC
+ store i8 %L5, i8* %Sl
+ store i8 %L5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
new file mode 100644
index 0000000..f9cab03
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
@@ -0,0 +1,138 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; `Opc && "Cannot copy registers"' assertion.
+; It should at least successfully build.
+
+define void @autogen_SD1935737938(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i64
+ %A3 = alloca <4 x i32>
+ %A2 = alloca i64
+ %A1 = alloca i32
+ %A = alloca <2 x i64>
+ %L = load i8* %0
+ store i8 -1, i8* %0
+ %E = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I = insertelement <1 x i64> <i64 -1>, i64 286689, i32 0
+ %B = lshr i8 %L, -69
+ %ZE = fpext float 0xBF2AA5FE80000000 to double
+ %Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1>
+ %L5 = load i8* %0
+ store i8 -69, i8* %0
+ %E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
+ %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I8 = insertelement <2 x i32> zeroinitializer, i32 135673, i32 1
+ %B9 = udiv i8 %B, %B
+ %FC = uitofp i32 %3 to double
+ %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer
+ %Cmp = icmp ne <1 x i64> %I, <i64 -1>
+ %L11 = load i8* %0
+ store i8 %L11, i8* %0
+ %E12 = extractelement <1 x i64> <i64 -1>, i32 0
+ %Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1>
+ %I14 = insertelement <1 x i64> %I, i64 303290, i32 0
+ %B15 = frem float 0.000000e+00, 0.000000e+00
+ %Sl16 = select i1 true, <1 x i1> %Cmp, <1 x i1> zeroinitializer
+ %Cmp17 = fcmp one float 0xBD946F9840000000, %B15
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF80, %CF76, %BB
+ %L18 = load i8* %0
+ store i8 -69, i8* %0
+ %E19 = extractelement <1 x i64> %Sl, i32 0
+ %Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
+ %I21 = insertelement <2 x i32> %Shuff, i32 135673, i32 0
+ %B22 = urem i32 135673, %3
+ %FC23 = sitofp i8 %L to float
+ %Sl24 = select i1 true, i8 %B, i8 %L18
+ %L25 = load i8* %0
+ store i8 %L, i8* %0
+ %E26 = extractelement <2 x i32> %Shuff, i32 1
+ %Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0>
+ %I28 = insertelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E12, i32 8
+ %B29 = frem double %ZE, 0x235104F0E94F406E
+ %Tr = trunc i64 286689 to i8
+ %Sl30 = select i1 true, float 0x45B13EA500000000, float %B15
+ %Cmp31 = icmp eq i32 %B22, %B22
+ br i1 %Cmp31, label %CF74, label %CF80
+
+CF80: ; preds = %CF74
+ %L32 = load i8* %0
+ store i8 -1, i8* %0
+ %E33 = extractelement <2 x i32> zeroinitializer, i32 1
+ %Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
+ %I35 = insertelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 -1, i32 0
+ %FC36 = sitofp <1 x i1> %Cmp to <1 x float>
+ %Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27
+ %L39 = load i8* %0
+ store i8 %Sl24, i8* %0
+ %E40 = extractelement <8 x i64> zeroinitializer, i32 1
+ %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2>
+ %I42 = insertelement <4 x i32> zeroinitializer, i32 414573, i32 2
+ %B43 = srem i8 %L5, %L39
+ %Sl44 = select i1 %Cmp17, i8 %L, i8 %L
+ %Cmp45 = fcmp une float 0x3AFCE1A0C0000000, 0.000000e+00
+ br i1 %Cmp45, label %CF74, label %CF76
+
+CF76: ; preds = %CF80
+ %L46 = load i8* %0
+ store i8 %L39, i8* %0
+ %E47 = extractelement <2 x i32> %Shuff27, i32 0
+ %Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1>
+ %I49 = insertelement <1 x i64> <i64 -1>, i64 %E12, i32 0
+ %FC50 = fptosi double 0x235104F0E94F406E to i32
+ %Sl51 = select i1 %Cmp17, <16 x i64> %I28, <16 x i64> %I28
+ %Cmp52 = icmp ne i8 %Tr, %Sl24
+ br i1 %Cmp52, label %CF74, label %CF75
+
+CF75: ; preds = %CF75, %CF76
+ %L53 = load i8* %0
+ store i8 %L18, i8* %0
+ %E54 = extractelement <8 x i8> %Shuff20, i32 5
+ %Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ %I56 = insertelement <4 x i32> %I42, i32 %B22, i32 2
+ %B57 = sub i64 %E40, %E6
+ %Sl58 = select i1 true, i64 303290, i64 %E40
+ %Cmp59 = icmp slt i64 %E40, %E6
+ br i1 %Cmp59, label %CF75, label %CF78
+
+CF78: ; preds = %CF75
+ %L60 = load i8* %0
+ store i8 -69, i8* %0
+ %E61 = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3>
+ %I63 = insertelement <1 x i1> %Sl16, i1 %Cmp45, i32 0
+ %B64 = and i8 %Sl44, -69
+ %ZE65 = zext <1 x i1> %Shuff48 to <1 x i64>
+ %Sl66 = select i1 true, <1 x i64> %I, <1 x i64> %I49
+ %Cmp67 = icmp ugt i64 286689, %E40
+ br label %CF
+
+CF: ; preds = %CF, %CF78
+ %L68 = load i8* %0
+ store i64 %B57, i64* %2
+ %E69 = extractelement <2 x i1> %Shuff41, i32 1
+ br i1 %E69, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF
+ %Shuff70 = shufflevector <1 x i64> %Shuff34, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
+ %I71 = insertelement <2 x i32> %Shuff, i32 %E26, i32 0
+ %Se = sext i8 %L60 to i32
+ %Sl72 = select i1 %Cmp45, <2 x i32> %Shuff62, <2 x i32> %I71
+ %Cmp73 = fcmp ugt double 0x235104F0E94F406E, 0x235104F0E94F406E
+ br i1 %Cmp73, label %CF77, label %CF79
+
+CF79: ; preds = %CF77
+ store i8 %L18, i8* %0
+ store i8 %E54, i8* %0
+ store i8 %L39, i8* %0
+ store i8 %L39, i8* %0
+ store i8 %B, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll
new file mode 100644
index 0000000..3811314
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a "Cannot select ..." error.
+; This was because undef's are ignored when checking if a vector constant is a
+; splat, but are legalized to zero if left in the DAG which changes the constant
+; into a non-splat.
+;
+; It should at least successfully build.
+
+define void @autogen_SD2090927243() {
+BB:
+ br label %CF77
+
+CF77: ; preds = %CF77, %CF80
+ %Shuff27 = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>,
+ <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>,
+ <16 x i32> <i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 17, i32 19, i32 21, i32 23, i32 undef>
+ %ZE30 = zext <16 x i8> %Shuff27 to <16 x i32>
+ %Cmp32 = fcmp ueq float undef, 0x3CDA6E5E40000000
+ br i1 %Cmp32, label %CF77, label %CF
+
+CF: ; preds = %CF, %CF81
+ %E48 = extractelement <16 x i32> %ZE30, i32 14
+ br i1 undef, label %CF, label %CF78
+
+CF78: ; preds = %CF
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll
new file mode 100644
index 0000000..564ad74
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a "Cannot select ..." error.
+; This happened because the legalizer treated undef's in the <4 x float>
+; constant as equivalent to the defined elements when checking if it a constant
+; splat, but then proceeded to legalize the undef's to zero, leaving it as a
+; non-splat that cannot be selected. It should have eliminated the undef's by
+; rewriting the splat constant.
+
+; It should at least successfully build.
+
+define void @autogen_SD2501752154() {
+BB:
+ %BC = bitcast <4 x i32> <i32 -1, i32 -1, i32 undef, i32 undef> to <4 x float>
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF
+ %E54 = extractelement <1 x i1> undef, i32 0
+ br i1 %E54, label %CF74, label %CF79
+
+CF79: ; preds = %CF75
+ %I63 = insertelement <4 x float> %BC, float undef, i32 0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
new file mode 100644
index 0000000..e14f405
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
@@ -0,0 +1,141 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA after dereferencing a null this pointer.
+; It should at least successfully build.
+
+define void @autogen_SD2704903805(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i32
+ %A3 = alloca i32
+ %A2 = alloca i8
+ %A1 = alloca i32
+ %A = alloca i8
+ %L = load i8* %0
+ store i8 %5, i8* %0
+ %E = extractelement <2 x i16> zeroinitializer, i32 0
+ %Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef
+ %I = insertelement <1 x i8> <i8 -1>, i8 85, i32 0
+ %B = lshr <2 x i16> zeroinitializer, zeroinitializer
+ %FC = sitofp <4 x i16> zeroinitializer to <4 x float>
+ %Sl = select i1 true, float 0.000000e+00, float 0x401E76A240000000
+ %Cmp = icmp ule i16 -25210, %E
+ br label %CF83
+
+CF83: ; preds = %BB
+ %L5 = load i8* %0
+ store i8 85, i8* %0
+ %E6 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I8 = insertelement <4 x i16> zeroinitializer, i16 %E, i32 3
+ %B9 = ashr <2 x i16> %Shuff7, zeroinitializer
+ %FC10 = sitofp i32 -1 to float
+ %Sl11 = select i1 %Cmp, i32 -1, i32 -1
+ %Cmp12 = icmp sgt i32 -1, -1
+ br label %CF
+
+CF: ; preds = %CF, %CF81, %CF83
+ %L13 = load i8* %0
+ store i8 0, i8* %0
+ %E14 = extractelement <2 x i64> zeroinitializer, i32 0
+ %Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
+ %I16 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 81222, i32 1
+ %B17 = lshr <2 x i16> zeroinitializer, %B
+ %Tr = trunc i32 272597 to i1
+ br i1 %Tr, label %CF, label %CF80
+
+CF80: ; preds = %CF80, %CF
+ %Sl18 = select i1 %Cmp, <2 x i64> zeroinitializer, <2 x i64> zeroinitializer
+ %Cmp19 = icmp ne i1 %Cmp12, %Cmp
+ br i1 %Cmp19, label %CF80, label %CF81
+
+CF81: ; preds = %CF80
+ %L20 = load i8* %0
+ store i8 85, i8* %0
+ %E21 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer
+ %I23 = insertelement <1 x i8> <i8 -1>, i8 %L5, i32 0
+ %FC24 = fptoui <4 x float> %FC to <4 x i16>
+ %Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1>
+ %Cmp26 = icmp ult <4 x i64> %I16, %Shuff15
+ %L27 = load i8* %0
+ store i8 %L, i8* %0
+ %E28 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9>
+ %I30 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E14, i32 1
+ %B31 = mul i8 %E28, 85
+ %PC = bitcast i32* %A3 to i32*
+ %Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000
+ %L33 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E34 = extractelement <2 x i16> zeroinitializer, i32 1
+ %Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer
+ %I36 = insertelement <1 x i8> <i8 -1>, i8 %L13, i32 0
+ %B37 = xor i8 %L27, %L
+ %Sl38 = select i1 %Cmp, i16 %E34, i16 %E
+ %Cmp39 = icmp eq i1 %Cmp19, %Cmp
+ br i1 %Cmp39, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF81
+ %L40 = load i32* %PC
+ store i32 %3, i32* %PC
+ %E41 = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I43 = insertelement <1 x i8> <i8 -1>, i8 0, i32 0
+ %B44 = or i16 %E, -25210
+ %Se = sext i32 %3 to i64
+ %Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43
+ %Cmp46 = icmp sge <1 x i8> %I36, %Shuff
+ %L47 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E48 = extractelement <2 x i16> zeroinitializer, i32 0
+ %Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
+ %I50 = insertelement <2 x i32> %Sl25, i32 47963, i32 1
+ %B51 = srem <1 x i8> %I, %Shuff22
+ %FC52 = sitofp i8 %5 to double
+ %Sl53 = select i1 %Cmp39, i8 %L27, i8 85
+ %Cmp54 = icmp slt i16 %E34, %E34
+ br i1 %Cmp54, label %CF77, label %CF78
+
+CF78: ; preds = %CF78, %CF77
+ %L55 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E56 = extractelement <8 x i16> %Shuff29, i32 4
+ %Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
+ %I58 = insertelement <1 x i8> %B51, i8 %Sl53, i32 0
+ %ZE = fpext float %FC10 to double
+ %Sl59 = select i1 %Cmp12, <2 x i16> %B9, <2 x i16> zeroinitializer
+ %Cmp60 = fcmp ult double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp60, label %CF78, label %CF79
+
+CF79: ; preds = %CF79, %CF78
+ %L61 = load i32* %PC
+ store i32 %L33, i32* %A3
+ %E62 = extractelement <4 x i64> %Shuff15, i32 1
+ %Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6>
+ %I64 = insertelement <2 x i64> zeroinitializer, i64 %Se, i32 0
+ %B65 = shl i8 %5, 85
+ %ZE66 = zext <4 x i1> %Cmp26 to <4 x i32>
+ %Sl67 = select i1 %Tr, <1 x i8> %Shuff, <1 x i8> %I23
+ %Cmp68 = fcmp olt float 0x4712BFE680000000, 0x4712BFE680000000
+ br i1 %Cmp68, label %CF79, label %CF82
+
+CF82: ; preds = %CF79
+ %L69 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E70 = extractelement <8 x i16> zeroinitializer, i32 3
+ %Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4>
+ %I72 = insertelement <1 x i8> <i8 -1>, i8 %L, i32 0
+ %B73 = srem i64 %E62, %Se
+ %ZE74 = zext <4 x i1> %Cmp26 to <4 x i32>
+ %Sl75 = select i1 %Cmp, i32 463279, i32 %L61
+ %Cmp76 = icmp sgt <1 x i8> %Shuff49, %Shuff22
+ store i8 %B31, i8* %0
+ store i8 85, i8* %0
+ store i32 %L33, i32* %PC
+ store i8 %B65, i8* %0
+ store i8 %L5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
new file mode 100644
index 0000000..1a03e55
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
@@ -0,0 +1,149 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Don't know how to expand this condition!" unreachable.
+; It should at least successfully build.
+
+define void @autogen_SD3861334421(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <2 x i32>
+ %A3 = alloca <2 x double>
+ %A2 = alloca i64
+ %A1 = alloca i64
+ %A = alloca double
+ %L = load i8* %0
+ store i8 -101, i8* %0
+ %E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
+ %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1>
+ %I = insertelement <8 x i64> zeroinitializer, i64 %4, i32 5
+ %B = and i64 116376, 57247
+ %FC = uitofp i8 7 to double
+ %Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %L5 = load i8* %0
+ store i8 %L, i8* %0
+ %E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3
+ %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I8 = insertelement <8 x i8> %Sl, i8 7, i32 4
+ %B9 = or <8 x i64> zeroinitializer, zeroinitializer
+ %Sl10 = select i1 false, i64 116376, i64 380809
+ %Cmp = icmp sgt i32 394647, 17081
+ br label %CF
+
+CF: ; preds = %CF, %BB
+ %L11 = load i8* %0
+ store i8 -87, i8* %0
+ %E12 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5>
+ %I14 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 1
+ %B15 = srem i64 %Sl10, 380809
+ %FC16 = sitofp i64 57247 to float
+ %Sl17 = select i1 false, double 0x87A9374869A78EC6, double 0.000000e+00
+ %Cmp18 = icmp uge i8 %L, %5
+ br i1 %Cmp18, label %CF, label %CF80
+
+CF80: ; preds = %CF80, %CF88, %CF
+ %L19 = load i8* %0
+ store i8 -101, i8* %0
+ %E20 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
+ %I22 = insertelement <4 x i64> zeroinitializer, i64 127438, i32 1
+ %B23 = fdiv double %Sl17, 0.000000e+00
+ %Sl24 = select i1 %Cmp18, i32 420510, i32 492085
+ %Cmp25 = icmp ugt i1 %Cmp18, false
+ br i1 %Cmp25, label %CF80, label %CF83
+
+CF83: ; preds = %CF83, %CF80
+ %L26 = load i8* %0
+ store i8 -87, i8* %0
+ %E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
+ %Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
+ %I29 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 492085, i32 1
+ %B30 = lshr <8 x i8> %I8, %I8
+ %FC31 = sitofp <4 x i32> %Shuff28 to <4 x double>
+ %Sl32 = select i1 false, <8 x i8> %I8, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Cmp33 = icmp eq i64 %B, 116376
+ br i1 %Cmp33, label %CF83, label %CF88
+
+CF88: ; preds = %CF83
+ %L34 = load i8* %0
+ store i8 -87, i8* %0
+ %E35 = extractelement <8 x i64> %Shuff, i32 7
+ %Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
+ %I37 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 0
+ %B38 = xor <8 x i64> %B9, %B9
+ %ZE = zext i32 0 to i64
+ %Sl39 = select i1 %Cmp33, i8 %L11, i8 %L5
+ %Cmp40 = icmp sgt i1 %Cmp, false
+ br i1 %Cmp40, label %CF80, label %CF81
+
+CF81: ; preds = %CF81, %CF85, %CF87, %CF88
+ %L41 = load i8* %0
+ store i8 %L34, i8* %0
+ %E42 = extractelement <8 x i64> %Shuff13, i32 6
+ %Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7>
+ %I44 = insertelement <4 x i64> zeroinitializer, i64 116376, i32 3
+ %B45 = fsub float %FC16, 0x3AC86DCC40000000
+ %Tr = trunc <4 x i64> %I14 to <4 x i32>
+ %Sl46 = select i1 false, <8 x i64> %B38, <8 x i64> zeroinitializer
+ %Cmp47 = icmp sgt i1 %Cmp18, %Cmp18
+ br i1 %Cmp47, label %CF81, label %CF85
+
+CF85: ; preds = %CF81
+ %L48 = load i8* %0
+ store i8 -101, i8* %0
+ %E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2
+ %Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ %I51 = insertelement <4 x i64> zeroinitializer, i64 %E20, i32 3
+ %B52 = or i32 336955, %Sl24
+ %FC53 = uitofp i8 %L48 to double
+ %Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24
+ %Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer
+ %L56 = load i8* %0
+ store i8 %L11, i8* %0
+ %E57 = extractelement <4 x i64> %Shuff21, i32 1
+ %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2>
+ %I59 = insertelement <4 x i64> zeroinitializer, i64 %E42, i32 2
+ %B60 = udiv <8 x i8> %Sl, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Tr61 = trunc i8 49 to i1
+ br i1 %Tr61, label %CF81, label %CF84
+
+CF84: ; preds = %CF84, %CF85
+ %Sl62 = select i1 false, i8 %L, i8 %L48
+ %Cmp63 = icmp ne <8 x i64> %I, zeroinitializer
+ %L64 = load i8* %0
+ store i8 %5, i8* %0
+ %E65 = extractelement <8 x i1> %Cmp55, i32 0
+ br i1 %E65, label %CF84, label %CF87
+
+CF87: ; preds = %CF84
+ %Shuff66 = shufflevector <4 x i64> %Shuff21, <4 x i64> %I14, <4 x i32> <i32 3, i32 undef, i32 7, i32 1>
+ %I67 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %Sl54, i32 1
+ %B68 = frem double %B23, %Sl17
+ %ZE69 = zext <8 x i8> %Sl32 to <8 x i64>
+ %Sl70 = select i1 %Tr61, i64 %E20, i64 %E12
+ %Cmp71 = icmp slt <8 x i64> %I, %Shuff
+ %L72 = load i8* %0
+ store i8 %L72, i8* %0
+ %E73 = extractelement <8 x i1> %Cmp55, i32 6
+ br i1 %E73, label %CF81, label %CF82
+
+CF82: ; preds = %CF82, %CF87
+ %Shuff74 = shufflevector <4 x i32> %I67, <4 x i32> %I29, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
+ %I75 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 3
+ %B76 = fsub double 0.000000e+00, %FC53
+ %Tr77 = trunc i32 %E to i8
+ %Sl78 = select i1 %Cmp18, i64* %A2, i64* %2
+ %Cmp79 = icmp eq i32 394647, 492085
+ br i1 %Cmp79, label %CF82, label %CF86
+
+CF86: ; preds = %CF82
+ store i64 %Sl70, i64* %Sl78
+ store i64 %E57, i64* %Sl78
+ store i64 %Sl70, i64* %Sl78
+ store i64 %B, i64* %Sl78
+ store i64 %Sl10, i64* %Sl78
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
new file mode 100644
index 0000000..96547d9
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Type for zero vector elements is not legal" assertion.
+; It should at least successfully build.
+
+define void @autogen_SD3926023935(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i1
+ %A3 = alloca float
+ %A2 = alloca double
+ %A1 = alloca float
+ %A = alloca double
+ %L = load i8* %0
+ store i8 -123, i8* %0
+ %E = extractelement <4 x i64> zeroinitializer, i32 1
+ %Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 0
+ %BC = bitcast i64 181325 to double
+ %Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer
+ %Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer
+ %L5 = load i8* %0
+ store i8 %L, i8* %0
+ %E6 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0>
+ %I8 = insertelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 498254, i32 4
+ %B = shl i32 0, 364464
+ %Sl9 = select i1 false, i64 %E, i64 498254
+ %Cmp10 = icmp sge i8 -123, %5
+ br label %CF80
+
+CF80: ; preds = %BB
+ %L11 = load i8* %0
+ store i8 -123, i8* %0
+ %E12 = extractelement <2 x i16> zeroinitializer, i32 1
+ %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I14 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B, i32 2
+ %B15 = sdiv i64 334618, -1
+ %PC = bitcast i1* %A4 to i64*
+ %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ %Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16
+ %L18 = load double* %A2
+ store i64 498254, i64* %PC
+ %E19 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
+ %I21 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %B22 = fadd double 0.000000e+00, %BC
+ %ZE = zext <2 x i1> %Shuff20 to <2 x i32>
+ %Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer
+ %Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer
+ %L25 = load i8* %0
+ store i8 %L25, i8* %0
+ %E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3
+ %Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4>
+ %I28 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 0
+ %B29 = lshr i8 %E26, -43
+ %Tr = trunc i8 %L5 to i1
+ br label %CF79
+
+CF79: ; preds = %CF80
+ %Sl30 = select i1 false, i8 %B29, i8 -123
+ %Cmp31 = icmp sge <2 x i1> %I, %I
+ %L32 = load i64* %PC
+ store i8 -123, i8* %0
+ %E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2
+ %Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ %I35 = insertelement <4 x i64> zeroinitializer, i64 498254, i32 3
+ %B36 = sub <8 x i64> %I8, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %PC37 = bitcast i8* %0 to i1*
+ %Sl38 = select i1 %Cmp10, i8 -43, i8 %L5
+ %Cmp39 = icmp eq i64 498254, %B15
+ br label %CF
+
+CF: ; preds = %CF, %CF79
+ %L40 = load double* %A
+ store i1 %Cmp39, i1* %PC37
+ %E41 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef>
+ %I43 = insertelement <4 x i32> %Shuff, i32 %3, i32 0
+ %B44 = shl i64 %E41, -1
+ %Se = sext <2 x i1> %I to <2 x i32>
+ %Sl45 = select i1 %Cmp10, i1 false, i1 false
+ br i1 %Sl45, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF
+ %Cmp46 = fcmp uno double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp46, label %CF77, label %CF78
+
+CF78: ; preds = %CF78, %CF83, %CF82, %CF77
+ %L47 = load i64* %PC
+ store i8 -123, i8* %0
+ %E48 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
+ %I50 = insertelement <2 x i1> zeroinitializer, i1 %Cmp10, i32 0
+ %B51 = sdiv i64 %E19, 463132
+ %Tr52 = trunc i64 %E48 to i32
+ %Sl53 = select i1 %Tr, i1 %Cmp46, i1 %Cmp10
+ br i1 %Sl53, label %CF78, label %CF83
+
+CF83: ; preds = %CF78
+ %Cmp54 = fcmp uge double %L40, %L40
+ br i1 %Cmp54, label %CF78, label %CF82
+
+CF82: ; preds = %CF83
+ %L55 = load i64* %PC
+ store i64 %L32, i64* %PC
+ %E56 = extractelement <2 x i16> %Shuff7, i32 1
+ %Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I58 = insertelement <2 x i32> %Sl, i32 %Tr52, i32 0
+ %B59 = or i32 %B, %3
+ %FC = sitofp i64 498254 to double
+ %Sl60 = select i1 false, i64 %E6, i64 -1
+ %Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43
+ %L62 = load i64* %PC
+ store i64 %Sl9, i64* %PC
+ %E63 = extractelement <2 x i32> %ZE, i32 0
+ %Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
+ %I65 = insertelement <4 x i32> %Shuff, i32 %3, i32 3
+ %B66 = sub i64 %L47, 53612
+ %Tr67 = trunc i64 %4 to i32
+ %Sl68 = select i1 %Cmp39, i1 %Cmp39, i1 false
+ br i1 %Sl68, label %CF78, label %CF81
+
+CF81: ; preds = %CF82
+ %Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
+ %L70 = load i8* %0
+ store i64 %L55, i64* %PC
+ %E71 = extractelement <4 x i32> %Shuff49, i32 1
+ %Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I73 = insertelement <4 x i64> %Shuff64, i64 %E, i32 2
+ %B74 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
+ %Sl75 = select i1 %Sl68, i64 %B51, i64 %L55
+ %Cmp76 = icmp sgt <8 x i64> %B74, %B36
+ store i1 %Cmp39, i1* %PC37
+ store i64 %E41, i64* %PC
+ store i64 %L32, i64* %PC
+ store i64 %Sl75, i64* %2
+ store i64 %L32, i64* %PC
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
new file mode 100644
index 0000000..bef75f3
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
@@ -0,0 +1,152 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select instructions for extract_vector_elt for
+; v4f32 on MSA.
+; It should at least successfully build.
+
+define void @autogen_SD3997499501(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <1 x double>
+ %A3 = alloca double
+ %A2 = alloca float
+ %A1 = alloca double
+ %A = alloca double
+ %L = load i8* %0
+ store i8 97, i8* %0
+ %E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
+ %Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I = insertelement <4 x i64> zeroinitializer, i64 0, i32 3
+ %Tr = trunc <1 x i64> zeroinitializer to <1 x i8>
+ %Sl = select i1 false, double* %A1, double* %A
+ %Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer
+ %L5 = load double* %Sl
+ store float -4.374162e+06, float* %A2
+ %E6 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
+ %I8 = insertelement <2 x i1> %Shuff, i1 false, i32 0
+ %B = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %PC = bitcast float* %A2 to float*
+ %Sl9 = select i1 false, i32 82299, i32 0
+ %Cmp10 = icmp slt i8 97, %5
+ br label %CF72
+
+CF72: ; preds = %CF72, %CF80, %CF78, %BB
+ %L11 = load double* %Sl
+ store double 0.000000e+00, double* %Sl
+ %E12 = extractelement <2 x i1> zeroinitializer, i32 0
+ br i1 %E12, label %CF72, label %CF80
+
+CF80: ; preds = %CF72
+ %Shuff13 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 3, i32 1>
+ %I14 = insertelement <2 x i64> zeroinitializer, i64 %4, i32 1
+ %B15 = fadd double %L5, 0.000000e+00
+ %BC = bitcast i32 0 to float
+ %Sl16 = select i1 %E12, float 0xC7957ED940000000, float %BC
+ %Cmp17 = icmp eq i32 136082, 471909
+ br i1 %Cmp17, label %CF72, label %CF77
+
+CF77: ; preds = %CF77, %CF80
+ %L18 = load double* %Sl
+ store double 0.000000e+00, double* %Sl
+ %E19 = extractelement <2 x i1> zeroinitializer, i32 0
+ br i1 %E19, label %CF77, label %CF78
+
+CF78: ; preds = %CF77
+ %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I21 = insertelement <8 x i1> zeroinitializer, i1 %Cmp10, i32 7
+ %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer
+ %FC = uitofp i8 97 to double
+ %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
+ %L24 = load double* %Sl
+ store float %Sl16, float* %PC
+ %E25 = extractelement <2 x i1> %Shuff, i32 1
+ br i1 %E25, label %CF72, label %CF76
+
+CF76: ; preds = %CF78
+ %Shuff26 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %B22, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
+ %I27 = insertelement <4 x i64> zeroinitializer, i64 %E, i32 2
+ %B28 = mul <4 x i64> %I27, zeroinitializer
+ %ZE = zext <8 x i1> zeroinitializer to <8 x i64>
+ %Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06
+ %L30 = load i8* %0
+ store double %L5, double* %Sl
+ %E31 = extractelement <8 x i1> zeroinitializer, i32 5
+ br label %CF
+
+CF: ; preds = %CF, %CF81, %CF76
+ %Shuff32 = shufflevector <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <16 x i32> <i32 8, i32 undef, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 undef, i32 26, i32 28, i32 30, i32 undef, i32 2, i32 4, i32 6>
+ %I33 = insertelement <8 x i1> zeroinitializer, i1 false, i32 2
+ %BC34 = bitcast <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1> to <4 x float>
+ %Sl35 = select i1 %E12, <2 x i1> %I8, <2 x i1> zeroinitializer
+ %Cmp36 = fcmp oge double 0xC2C3BAE2D5C18360, 0xC2C3BAE2D5C18360
+ br i1 %Cmp36, label %CF, label %CF74
+
+CF74: ; preds = %CF74, %CF
+ %L37 = load float* %PC
+ store double 0.000000e+00, double* %Sl
+ %E38 = extractelement <2 x i1> %Sl23, i32 1
+ br i1 %E38, label %CF74, label %CF75
+
+CF75: ; preds = %CF75, %CF82, %CF74
+ %Shuff39 = shufflevector <2 x i1> %Shuff13, <2 x i1> zeroinitializer, <2 x i32> <i32 undef, i32 2>
+ %I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2
+ %Sl41 = select i1 %Cmp10, i32 0, i32 %3
+ %Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer
+ %L43 = load double* %Sl
+ store i64 %4, i64* %2
+ %E44 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E44, label %CF75, label %CF82
+
+CF82: ; preds = %CF75
+ %Shuff45 = shufflevector <2 x i1> %Sl23, <2 x i1> %Sl23, <2 x i32> <i32 2, i32 0>
+ %I46 = insertelement <4 x i64> zeroinitializer, i64 0, i32 0
+ %B47 = sub i64 %E, %E6
+ %Sl48 = select i1 %Cmp10, double %L5, double %L43
+ %Cmp49 = icmp uge i64 %4, %B47
+ br i1 %Cmp49, label %CF75, label %CF81
+
+CF81: ; preds = %CF82
+ %L50 = load i8* %0
+ store double %L43, double* %Sl
+ %E51 = extractelement <4 x i64> %Shuff7, i32 3
+ %Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I53 = insertelement <2 x i1> %Cmp, i1 %E25, i32 0
+ %B54 = fdiv double %L24, %L43
+ %BC55 = bitcast <4 x i64> zeroinitializer to <4 x double>
+ %Sl56 = select i1 false, i8 %5, i8 97
+ %L57 = load i8* %0
+ store i8 %L50, i8* %0
+ %E58 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E58, label %CF, label %CF73
+
+CF73: ; preds = %CF73, %CF81
+ %Shuff59 = shufflevector <2 x i1> %Shuff13, <2 x i1> %Shuff45, <2 x i32> <i32 undef, i32 0>
+ %I60 = insertelement <4 x float> %Shuff52, float -4.374162e+06, i32 0
+ %B61 = mul <4 x i64> %I46, zeroinitializer
+ %PC62 = bitcast double* %A3 to float*
+ %Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer
+ %Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff
+ %L65 = load double* %A1
+ store float -4.374162e+06, float* %PC62
+ %E66 = extractelement <8 x i1> %I21, i32 3
+ br i1 %E66, label %CF73, label %CF79
+
+CF79: ; preds = %CF79, %CF73
+ %Shuff67 = shufflevector <8 x i1> %I21, <8 x i1> %I21, <8 x i32> <i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 undef, i32 4>
+ %I68 = insertelement <1 x i1> %Cmp42, i1 %E25, i32 0
+ %B69 = sdiv <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %Sl70 = select i1 %Cmp49, <2 x i1> %Sl23, <2 x i1> %Shuff45
+ %Cmp71 = icmp ne i1 false, false
+ br i1 %Cmp71, label %CF79, label %CF83
+
+CF83: ; preds = %CF79
+ store double 0.000000e+00, double* %Sl
+ store float %BC, float* %PC62
+ store double %Sl48, double* %Sl
+ store double %FC, double* %Sl
+ store float %BC, float* %PC62
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
new file mode 100644
index 0000000..24e27cb
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test is based on an llvm-stress generated test case with seed=449609655
+
+; This test originally failed for MSA with a
+; "Comparison requires equal bit widths" assertion.
+; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to
+; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt
+; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The
+; assertion occured when trying to check the values were bitwise inverses of
+; each-other.
+;
+; It should at least successfully build.
+
+define void @autogen_SD449609655(i8) {
+BB:
+ %Cmp = icmp ult i8 -3, %0
+ br label %CF78
+
+CF78: ; preds = %CF81, %CF78, %BB
+ %Sl31 = select i1 %Cmp, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8> zeroinitializer
+ br i1 undef, label %CF78, label %CF81
+
+CF81: ; preds = %CF78
+ br i1 undef, label %CF78, label %CF80
+
+CF80: ; preds = %CF81
+ %I59 = insertelement <4 x i8> %Sl31, i8 undef, i32 1
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
new file mode 100644
index 0000000..697871d
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
@@ -0,0 +1,139 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; `Num < NumOperands && "Invalid child # of SDNode!"' assertion.
+; It should at least successfully build.
+
+define void @autogen_SD525530439(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i32
+ %A3 = alloca double
+ %A2 = alloca <1 x double>
+ %A1 = alloca <8 x double>
+ %A = alloca i64
+ %L = load i8* %0
+ store i64 33695, i64* %A
+ %E = extractelement <4 x i32> zeroinitializer, i32 3
+ %Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0>
+ %I = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
+ %B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %ZE = fpext float 0x3B64A2B880000000 to double
+ %Sl = select i1 true, i16 -1, i16 -11642
+ %L5 = load i8* %0
+ store i8 0, i8* %0
+ %E6 = extractelement <4 x i32> zeroinitializer, i32 2
+ %Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef>
+ %I8 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
+ %B9 = sub i32 71140, 439732
+ %BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float>
+ %Sl10 = select i1 true, i32* %1, i32* %1
+ %Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer
+ %L11 = load i32* %Sl10
+ store <1 x double> zeroinitializer, <1 x double>* %A2
+ %E12 = extractelement <4 x i16> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef
+ %I14 = insertelement <1 x i16> zeroinitializer, i16 %Sl, i32 0
+ %B15 = or i16 -1, %E12
+ %BC16 = bitcast <4 x i32> zeroinitializer to <4 x float>
+ %Sl17 = select i1 true, i64 %4, i64 %4
+ %Cmp18 = fcmp ugt float 0xC5ABB1BF80000000, 0x3EEF3D6300000000
+ br label %CF75
+
+CF75: ; preds = %CF75, %BB
+ %L19 = load i32* %Sl10
+ store i32 %L11, i32* %Sl10
+ %E20 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
+ %I22 = insertelement <4 x float> %BC16, float 0x3EEF3D6300000000, i32 2
+ %B23 = shl i32 71140, 439732
+ %ZE24 = fpext <4 x float> %I22 to <4 x double>
+ %Sl25 = select i1 %Cmp18, i32 %L11, i32 %L11
+ %Cmp26 = icmp ne i32 %E20, %L19
+ br i1 %Cmp26, label %CF75, label %CF76
+
+CF76: ; preds = %CF75
+ %L27 = load i32* %Sl10
+ store i32 439732, i32* %Sl10
+ %E28 = extractelement <4 x i32> %Shuff21, i32 3
+ %Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0>
+ %I30 = insertelement <8 x i1> %Shuff7, i1 %Cmp18, i32 4
+ %Sl31 = select i1 %Cmp18, i32 %3, i32 %B23
+ %Cmp32 = icmp ugt i32 0, %3
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF80, %CF78, %CF76
+ %L33 = load i64* %2
+ store i32 71140, i32* %Sl10
+ %E34 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef
+ %I36 = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
+ %B37 = mul <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %Shuff29
+ %Sl38 = select i1 %Cmp18, double 0.000000e+00, double 0x2BA9DB480DA732C6
+ %Cmp39 = icmp sgt i16 -11642, %Sl
+ br i1 %Cmp39, label %CF74, label %CF80
+
+CF80: ; preds = %CF74
+ %L40 = load i8* %0
+ store i32 0, i32* %Sl10
+ %E41 = extractelement <8 x i64> zeroinitializer, i32 1
+ %Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef
+ %I43 = insertelement <4 x i16> %I36, i16 -11642, i32 0
+ %FC = fptoui float 0x455CA2B080000000 to i16
+ %Sl44 = select i1 %Cmp18, i1 %Cmp18, i1 %Cmp39
+ br i1 %Sl44, label %CF74, label %CF78
+
+CF78: ; preds = %CF80
+ %L45 = load i32* %Sl10
+ store i8 %L5, i8* %0
+ %E46 = extractelement <8 x i1> %Shuff7, i32 2
+ br i1 %E46, label %CF74, label %CF77
+
+CF77: ; preds = %CF77, %CF78
+ %Shuff47 = shufflevector <4 x i16> %I43, <4 x i16> zeroinitializer, <4 x i32> <i32 5, i32 undef, i32 1, i32 3>
+ %I48 = insertelement <1 x i16> %Shuff42, i16 %Sl, i32 0
+ %B49 = mul i8 0, %L40
+ %FC50 = uitofp i32 %3 to double
+ %Sl51 = select i1 %Sl44, i32 %L27, i32 0
+ %Cmp52 = icmp sge i8 %B49, 0
+ br i1 %Cmp52, label %CF77, label %CF79
+
+CF79: ; preds = %CF77
+ %L53 = load i32* %Sl10
+ store i8 %L40, i8* %0
+ %E54 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2>
+ %I56 = insertelement <4 x i32> zeroinitializer, i32 %Sl51, i32 2
+ %Tr = trunc <1 x i64> %Shuff13 to <1 x i16>
+ %Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>
+ %Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56
+ %L59 = load i8* %0
+ store <1 x double> zeroinitializer, <1 x double>* %A2
+ %E60 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
+ %I62 = insertelement <4 x i16> zeroinitializer, i16 %E12, i32 1
+ %B63 = and <4 x i32> %Shuff61, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %PC = bitcast double* %A3 to i32*
+ %Sl64 = select i1 %Cmp18, <4 x i32> %Shuff61, <4 x i32> %Shuff55
+ %Cmp65 = icmp sgt i32 439732, %3
+ br label %CF
+
+CF: ; preds = %CF79
+ %L66 = load i32* %Sl10
+ store i32 %E6, i32* %PC
+ %E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
+ %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef>
+ %I69 = insertelement <4 x i16> %Shuff47, i16 %Sl, i32 3
+ %B70 = sdiv <4 x i64> zeroinitializer, zeroinitializer
+ %FC71 = sitofp i32 %L66 to double
+ %Sl72 = select i1 %Cmp18, i64 %4, i64 %4
+ %Cmp73 = icmp eq <4 x i64> zeroinitializer, %B70
+ store i32 %B23, i32* %PC
+ store i32 %3, i32* %PC
+ store i32 %3, i32* %Sl10
+ store i32 %L27, i32* %1
+ store i32 0, i32* %PC
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
new file mode 100644
index 0000000..dc4200a
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select instructions for extract_vector_elt for
+; v2f64 on MSA.
+; It should at least successfully build.
+
+define void @autogen_SD997348632(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <2 x i32>
+ %A3 = alloca <16 x i16>
+ %A2 = alloca <4 x i1>
+ %A1 = alloca <4 x i16>
+ %A = alloca <2 x i32>
+ %L = load i8* %0
+ store i8 %L, i8* %0
+ %E = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
+ %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %FC = sitofp <4 x i32> zeroinitializer to <4 x double>
+ %Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff
+ %L5 = load i8* %0
+ store i8 %5, i8* %0
+ %E6 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef>
+ %I8 = insertelement <1 x i16> zeroinitializer, i16 0, i32 0
+ %B = xor i32 376034, %3
+ %FC9 = fptoui float 0x406DB70180000000 to i64
+ %Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer
+ %L11 = load i8* %0
+ store i8 %L, i8* %0
+ %E12 = extractelement <4 x i64> zeroinitializer, i32 2
+ %Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3>
+ %I14 = insertelement <8 x i32> zeroinitializer, i32 -1, i32 7
+ %B15 = fdiv <4 x double> %FC, %FC
+ %Tr = trunc i32 376034 to i16
+ %Sl16 = select i1 false, <8 x i32> %Sl10, <8 x i32> zeroinitializer
+ %Cmp17 = icmp uge i32 233658, %E
+ br label %CF
+
+CF: ; preds = %CF, %CF79, %CF84, %BB
+ %L18 = load i8* %0
+ store i8 %L, i8* %0
+ %E19 = extractelement <4 x i64> %Sl, i32 3
+ %Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0>
+ %I21 = insertelement <4 x i64> zeroinitializer, i64 %FC9, i32 0
+ %B22 = xor <8 x i32> %I14, %I14
+ %Tr23 = trunc i16 0 to i8
+ %Sl24 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> zeroinitializer
+ %Cmp25 = icmp eq i1 false, false
+ br i1 %Cmp25, label %CF, label %CF79
+
+CF79: ; preds = %CF
+ %L26 = load i8* %0
+ store i8 %L26, i8* %0
+ %E27 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
+ %I29 = insertelement <16 x i32> %Shuff28, i32 %B, i32 15
+ %B30 = fdiv float 0.000000e+00, -6.749110e+06
+ %Sl31 = select i1 false, i32 %3, i32 %3
+ %Cmp32 = fcmp uno float 0.000000e+00, 0x406DB70180000000
+ br i1 %Cmp32, label %CF, label %CF78
+
+CF78: ; preds = %CF78, %CF79
+ %L33 = load i8* %0
+ store i8 %L, i8* %0
+ %E34 = extractelement <16 x i32> %Shuff28, i32 1
+ %Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2>
+ %I36 = insertelement <4 x double> %FC, double 0xA4A57F449CA36CC2, i32 2
+ %Se = sext <4 x i1> %Cmp to <4 x i32>
+ %Sl37 = select i1 %Cmp17, i32 0, i32 0
+ %Cmp38 = icmp ne i32 440284, 376034
+ br i1 %Cmp38, label %CF78, label %CF80
+
+CF80: ; preds = %CF80, %CF82, %CF78
+ %L39 = load i8* %0
+ store i8 %L, i8* %0
+ %E40 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E40, label %CF80, label %CF82
+
+CF82: ; preds = %CF80
+ %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff20, <2 x i32> <i32 2, i32 0>
+ %I42 = insertelement <2 x i1> %Shuff41, i1 false, i32 0
+ %B43 = sub i32 %E, 0
+ %Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28
+ %Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21
+ %L46 = load i8* %0
+ store i8 %L11, i8* %0
+ %E47 = extractelement <8 x i32> %Sl16, i32 4
+ %Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1>
+ %I49 = insertelement <2 x i1> %Shuff48, i1 %Cmp17, i32 1
+ %B50 = and <8 x i32> %I14, %Sl10
+ %FC51 = fptoui float -6.749110e+06 to i1
+ br i1 %FC51, label %CF80, label %CF81
+
+CF81: ; preds = %CF81, %CF82
+ %Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000
+ %Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1>
+ %L54 = load i8* %0
+ store i8 %L5, i8* %0
+ %E55 = extractelement <8 x i32> zeroinitializer, i32 7
+ %Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0>
+ %I57 = insertelement <2 x i1> %Shuff7, i1 false, i32 0
+ %B58 = fmul <4 x double> %FC, %FC
+ %FC59 = fptoui <4 x double> %I36 to <4 x i16>
+ %Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57
+ %Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %L62 = load i8* %0
+ store i8 %L33, i8* %0
+ %E63 = extractelement <4 x i64> %Shuff, i32 2
+ %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef>
+ %I65 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %B66 = sdiv i32 %B, %E55
+ %Tr67 = trunc i8 %L54 to i1
+ br i1 %Tr67, label %CF81, label %CF83
+
+CF83: ; preds = %CF83, %CF81
+ %Sl68 = select i1 %Cmp17, i1 %Cmp25, i1 %Tr67
+ br i1 %Sl68, label %CF83, label %CF84
+
+CF84: ; preds = %CF83
+ %Cmp69 = icmp uge i32 %E, %E34
+ br i1 %Cmp69, label %CF, label %CF77
+
+CF77: ; preds = %CF84
+ %L70 = load i8* %0
+ store i8 %L, i8* %0
+ %E71 = extractelement <4 x i64> %Shuff, i32 0
+ %Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
+ %I73 = insertelement <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B66, i32 1
+ %FC74 = uitofp i1 %Cmp32 to double
+ %Sl75 = select i1 %FC51, i16 9704, i16 0
+ %Cmp76 = icmp ugt <1 x i16> %I8, %I8
+ store i8 %L39, i8* %0
+ store i8 %5, i8* %0
+ store i8 %Tr23, i8* %0
+ store i8 %L, i8* %0
+ store i8 %5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll b/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
new file mode 100644
index 0000000..8c4fcba
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select code for a truncstore of a
+; build_vector.
+; It should at least successfully build.
+
+define void @autogen_SD742806235(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca double
+ %A3 = alloca double
+ %A2 = alloca <8 x i8>
+ %A1 = alloca <4 x float>
+ %A = alloca i1
+ store i8 %5, i8* %0
+ store i8 %5, i8* %0
+ store i8 %5, i8* %0
+ store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, <8 x i8>* %A2
+ store i8 %5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/shift-dagcombine.ll b/test/CodeGen/Mips/msa/shift-dagcombine.ll
new file mode 100644
index 0000000..0d809fb
--- /dev/null
+++ b/test/CodeGen/Mips/msa/shift-dagcombine.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @ashr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: ashr_v4i32:
+
+ %1 = ashr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = ashr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -2
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size ashr_v4i32
+}
+
+define void @lshr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: lshr_v4i32:
+
+ %1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], %lo
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size lshr_v4i32
+}
+
+define void @shl_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: shl_v4i32:
+
+ %1 = shl <4 x i32> <i32 8, i32 4, i32 2, i32 1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = shl <4 x i32> <i32 -8, i32 -4, i32 -2, i32 -1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size shl_v4i32
+}
diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll
new file mode 100644
index 0000000..316c669
--- /dev/null
+++ b/test/CodeGen/Mips/msa/shuffle.ll
@@ -0,0 +1,803 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_0
+}
+
+define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_1:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_1
+}
+
+define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_2:
+
+ %1 = load <16 x i8>* %a
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_2
+}
+
+define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_3:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_3
+}
+
+define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_4:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_4
+}
+
+define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_0
+}
+
+define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_1:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_1
+}
+
+define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_2:
+
+ %1 = load <8 x i16>* %a
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_2
+}
+
+define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_3:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_3
+}
+
+define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_4:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_4
+}
+
+; Note: v4i32 only has one 4-element set so it's impossible to get a vshf.w
+; instruction when using a single vector.
+
+define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_0
+}
+
+define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_1:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_1
+}
+
+define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_2:
+
+ %1 = load <4 x i32>* %a
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_2
+}
+
+define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_3:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_3
+}
+
+define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_4:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_4
+}
+
+define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_0
+}
+
+define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_1:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_1
+}
+
+define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_2:
+
+ %1 = load <2 x i64>* %a
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_2
+}
+
+define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_3:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_3
+}
+
+define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_4:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_4
+}
+
+define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: shf_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12>
+ ; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v16i8_0
+}
+
+define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: shf_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v8i16_0
+}
+
+define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: shf_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v4i32_0
+}
+
+; shf.d does not exist
+
+define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvev_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v16i8_0
+}
+
+define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvev_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v8i16_0
+}
+
+define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvev_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v4i32_0
+}
+
+define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvev_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v2i64_0
+}
+
+define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvod_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v16i8_0
+}
+
+define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvod_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v8i16_0
+}
+
+define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvod_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v4i32_0
+}
+
+define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvod_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v2i64_0
+}
+
+define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvl_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v16i8_0
+}
+
+define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvl_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v8i16_0
+}
+
+define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvl_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v4i32_0
+}
+
+define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvl_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; ilvl.d and ilvev.d are equivalent for v2i64
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v2i64_0
+}
+
+define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvr_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v16i8_0
+}
+
+define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvr_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v8i16_0
+}
+
+define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvr_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v4i32_0
+}
+
+define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvr_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; ilvr.d and ilvod.d are equivalent for v2i64
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v2i64_0
+}
+
+define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: pckev_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v16i8_0
+}
+
+define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: pckev_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v8i16_0
+}
+
+define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: pckev_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v4i32_0
+}
+
+define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: pckev_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; pckev.d and ilvev.d are equivalent for v2i64
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v2i64_0
+}
+
+define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: pckod_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v16i8_0
+}
+
+define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: pckod_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v8i16_0
+}
+
+define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: pckod_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v4i32_0
+}
+
+define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: pckod_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; pckod.d and ilvod.d are equivalent for v2i64
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v2i64_0
+}
+
+define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: splati_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef,
+ <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][4]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v16i8_0
+}
+
+define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: splati_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v8i16_0
+}
+
+define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: splati_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ; shf.w and splati.w are equivalent
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 255
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v4i32_0
+}
+
+define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: splati_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v2i64_0
+}
diff --git a/test/CodeGen/Mips/msa/special.ll b/test/CodeGen/Mips/msa/special.ll
new file mode 100644
index 0000000..60a4369
--- /dev/null
+++ b/test/CodeGen/Mips/msa/special.ll
@@ -0,0 +1,26 @@
+; Test the MSA intrinsics that are encoded with the SPECIAL instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @llvm_mips_lsa_test(i32 %a, i32 %b) nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.lsa(i32 %a, i32 %b, i32 2)
+ ret i32 %0
+}
+
+declare i32 @llvm.mips.lsa(i32, i32, i32) nounwind
+
+; CHECK: llvm_mips_lsa_test:
+; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
+; CHECK: .size llvm_mips_lsa_test
+
+define i32 @lsa_test(i32 %a, i32 %b) nounwind {
+entry:
+ %0 = shl i32 %b, 2
+ %1 = add i32 %a, %0
+ ret i32 %1
+}
+
+; CHECK: lsa_test:
+; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
+; CHECK: .size lsa_test
diff --git a/test/CodeGen/Mips/msa/spill.ll b/test/CodeGen/Mips/msa/spill.ll
new file mode 100644
index 0000000..66f896a
--- /dev/null
+++ b/test/CodeGen/Mips/msa/spill.ll
@@ -0,0 +1,601 @@
+; Test that the correct instruction is chosen for spill and reload by trying
+; to have 33 live MSA registers simultaneously
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <16 x i8>* %p0, i32 1
+ %p2 = getelementptr <16 x i8>* %p0, i32 2
+ %p3 = getelementptr <16 x i8>* %p0, i32 3
+ %p4 = getelementptr <16 x i8>* %p0, i32 4
+ %p5 = getelementptr <16 x i8>* %p0, i32 5
+ %p6 = getelementptr <16 x i8>* %p0, i32 6
+ %p7 = getelementptr <16 x i8>* %p0, i32 7
+ %p8 = getelementptr <16 x i8>* %p0, i32 8
+ %p9 = getelementptr <16 x i8>* %p0, i32 9
+ %p10 = getelementptr <16 x i8>* %p0, i32 10
+ %p11 = getelementptr <16 x i8>* %p0, i32 11
+ %p12 = getelementptr <16 x i8>* %p0, i32 12
+ %p13 = getelementptr <16 x i8>* %p0, i32 13
+ %p14 = getelementptr <16 x i8>* %p0, i32 14
+ %p15 = getelementptr <16 x i8>* %p0, i32 15
+ %p16 = getelementptr <16 x i8>* %p0, i32 16
+ %p17 = getelementptr <16 x i8>* %p0, i32 17
+ %p18 = getelementptr <16 x i8>* %p0, i32 18
+ %p19 = getelementptr <16 x i8>* %p0, i32 19
+ %p20 = getelementptr <16 x i8>* %p0, i32 20
+ %p21 = getelementptr <16 x i8>* %p0, i32 21
+ %p22 = getelementptr <16 x i8>* %p0, i32 22
+ %p23 = getelementptr <16 x i8>* %p0, i32 23
+ %p24 = getelementptr <16 x i8>* %p0, i32 24
+ %p25 = getelementptr <16 x i8>* %p0, i32 25
+ %p26 = getelementptr <16 x i8>* %p0, i32 26
+ %p27 = getelementptr <16 x i8>* %p0, i32 27
+ %p28 = getelementptr <16 x i8>* %p0, i32 28
+ %p29 = getelementptr <16 x i8>* %p0, i32 29
+ %p30 = getelementptr <16 x i8>* %p0, i32 30
+ %p31 = getelementptr <16 x i8>* %p0, i32 31
+ %p32 = getelementptr <16 x i8>* %p0, i32 32
+ %p33 = getelementptr <16 x i8>* %p0, i32 33
+ %0 = load <16 x i8>* %p0, align 16
+ %1 = load <16 x i8>* %p1, align 16
+ %2 = load <16 x i8>* %p2, align 16
+ %3 = load <16 x i8>* %p3, align 16
+ %4 = load <16 x i8>* %p4, align 16
+ %5 = load <16 x i8>* %p5, align 16
+ %6 = load <16 x i8>* %p6, align 16
+ %7 = load <16 x i8>* %p7, align 16
+ %8 = load <16 x i8>* %p8, align 16
+ %9 = load <16 x i8>* %p9, align 16
+ %10 = load <16 x i8>* %p10, align 16
+ %11 = load <16 x i8>* %p11, align 16
+ %12 = load <16 x i8>* %p12, align 16
+ %13 = load <16 x i8>* %p13, align 16
+ %14 = load <16 x i8>* %p14, align 16
+ %15 = load <16 x i8>* %p15, align 16
+ %16 = load <16 x i8>* %p16, align 16
+ %17 = load <16 x i8>* %p17, align 16
+ %18 = load <16 x i8>* %p18, align 16
+ %19 = load <16 x i8>* %p19, align 16
+ %20 = load <16 x i8>* %p20, align 16
+ %21 = load <16 x i8>* %p21, align 16
+ %22 = load <16 x i8>* %p22, align 16
+ %23 = load <16 x i8>* %p23, align 16
+ %24 = load <16 x i8>* %p24, align 16
+ %25 = load <16 x i8>* %p25, align 16
+ %26 = load <16 x i8>* %p26, align 16
+ %27 = load <16 x i8>* %p27, align 16
+ %28 = load <16 x i8>* %p28, align 16
+ %29 = load <16 x i8>* %p29, align 16
+ %30 = load <16 x i8>* %p30, align 16
+ %31 = load <16 x i8>* %p31, align 16
+ %32 = load <16 x i8>* %p32, align 16
+ %33 = load <16 x i8>* %p33, align 16
+ %r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
+ %r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2)
+ %r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3)
+ %r4 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r3, <16 x i8> %4)
+ %r5 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r4, <16 x i8> %5)
+ %r6 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r5, <16 x i8> %6)
+ %r7 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r6, <16 x i8> %7)
+ %r8 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r7, <16 x i8> %8)
+ %r9 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r8, <16 x i8> %9)
+ %r10 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r9, <16 x i8> %10)
+ %r11 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r10, <16 x i8> %11)
+ %r12 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r11, <16 x i8> %12)
+ %r13 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r12, <16 x i8> %13)
+ %r14 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r13, <16 x i8> %14)
+ %r15 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r14, <16 x i8> %15)
+ %r16 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r15, <16 x i8> %16)
+ %r17 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r16, <16 x i8> %17)
+ %r18 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r17, <16 x i8> %18)
+ %r19 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r18, <16 x i8> %19)
+ %r20 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r19, <16 x i8> %20)
+ %r21 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r20, <16 x i8> %21)
+ %r22 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r21, <16 x i8> %22)
+ %r23 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r22, <16 x i8> %23)
+ %r24 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r23, <16 x i8> %24)
+ %r25 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r24, <16 x i8> %25)
+ %r26 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r25, <16 x i8> %26)
+ %r27 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r26, <16 x i8> %27)
+ %r28 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r27, <16 x i8> %28)
+ %r29 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r28, <16 x i8> %29)
+ %r30 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r29, <16 x i8> %30)
+ %r31 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r30, <16 x i8> %31)
+ %r32 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r31, <16 x i8> %32)
+ %r33 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r32, <16 x i8> %33)
+ %rx1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r33, <16 x i8> %1)
+ %rx2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx1, <16 x i8> %2)
+ %rx3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx2, <16 x i8> %3)
+ %rx4 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx3, <16 x i8> %4)
+ %rx5 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx4, <16 x i8> %5)
+ %rx6 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx5, <16 x i8> %6)
+ %rx7 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx6, <16 x i8> %7)
+ %rx8 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx7, <16 x i8> %8)
+ %rx9 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx8, <16 x i8> %9)
+ %rx10 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx9, <16 x i8> %10)
+ %rx11 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx10, <16 x i8> %11)
+ %rx12 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx11, <16 x i8> %12)
+ %rx13 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx12, <16 x i8> %13)
+ %rx14 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx13, <16 x i8> %14)
+ %rx15 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx14, <16 x i8> %15)
+ %rx16 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx15, <16 x i8> %16)
+ %rx17 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx16, <16 x i8> %17)
+ %rx18 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx17, <16 x i8> %18)
+ %rx19 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx18, <16 x i8> %19)
+ %rx20 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx19, <16 x i8> %20)
+ %rx21 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx20, <16 x i8> %21)
+ %rx22 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx21, <16 x i8> %22)
+ %rx23 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx22, <16 x i8> %23)
+ %rx24 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx23, <16 x i8> %24)
+ %rx25 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx24, <16 x i8> %25)
+ %rx26 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx25, <16 x i8> %26)
+ %rx27 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx26, <16 x i8> %27)
+ %rx28 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx27, <16 x i8> %28)
+ %rx29 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx28, <16 x i8> %29)
+ %rx30 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx29, <16 x i8> %30)
+ %rx31 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx30, <16 x i8> %31)
+ %rx32 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx31, <16 x i8> %32)
+ %rx33 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx32, <16 x i8> %33)
+ %res = call i32 @llvm.mips.copy.s.b(<16 x i8> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: test_i8:
+; CHECK: st.b {{.*}} Spill
+; CHECK: st.b {{.*}} Spill
+; CHECK: ld.b {{.*}} Reload
+; CHECK: ld.b {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <8 x i16>* %p0, i32 1
+ %p2 = getelementptr <8 x i16>* %p0, i32 2
+ %p3 = getelementptr <8 x i16>* %p0, i32 3
+ %p4 = getelementptr <8 x i16>* %p0, i32 4
+ %p5 = getelementptr <8 x i16>* %p0, i32 5
+ %p6 = getelementptr <8 x i16>* %p0, i32 6
+ %p7 = getelementptr <8 x i16>* %p0, i32 7
+ %p8 = getelementptr <8 x i16>* %p0, i32 8
+ %p9 = getelementptr <8 x i16>* %p0, i32 9
+ %p10 = getelementptr <8 x i16>* %p0, i32 10
+ %p11 = getelementptr <8 x i16>* %p0, i32 11
+ %p12 = getelementptr <8 x i16>* %p0, i32 12
+ %p13 = getelementptr <8 x i16>* %p0, i32 13
+ %p14 = getelementptr <8 x i16>* %p0, i32 14
+ %p15 = getelementptr <8 x i16>* %p0, i32 15
+ %p16 = getelementptr <8 x i16>* %p0, i32 16
+ %p17 = getelementptr <8 x i16>* %p0, i32 17
+ %p18 = getelementptr <8 x i16>* %p0, i32 18
+ %p19 = getelementptr <8 x i16>* %p0, i32 19
+ %p20 = getelementptr <8 x i16>* %p0, i32 20
+ %p21 = getelementptr <8 x i16>* %p0, i32 21
+ %p22 = getelementptr <8 x i16>* %p0, i32 22
+ %p23 = getelementptr <8 x i16>* %p0, i32 23
+ %p24 = getelementptr <8 x i16>* %p0, i32 24
+ %p25 = getelementptr <8 x i16>* %p0, i32 25
+ %p26 = getelementptr <8 x i16>* %p0, i32 26
+ %p27 = getelementptr <8 x i16>* %p0, i32 27
+ %p28 = getelementptr <8 x i16>* %p0, i32 28
+ %p29 = getelementptr <8 x i16>* %p0, i32 29
+ %p30 = getelementptr <8 x i16>* %p0, i32 30
+ %p31 = getelementptr <8 x i16>* %p0, i32 31
+ %p32 = getelementptr <8 x i16>* %p0, i32 32
+ %p33 = getelementptr <8 x i16>* %p0, i32 33
+ %0 = load <8 x i16>* %p0, align 16
+ %1 = load <8 x i16>* %p1, align 16
+ %2 = load <8 x i16>* %p2, align 16
+ %3 = load <8 x i16>* %p3, align 16
+ %4 = load <8 x i16>* %p4, align 16
+ %5 = load <8 x i16>* %p5, align 16
+ %6 = load <8 x i16>* %p6, align 16
+ %7 = load <8 x i16>* %p7, align 16
+ %8 = load <8 x i16>* %p8, align 16
+ %9 = load <8 x i16>* %p9, align 16
+ %10 = load <8 x i16>* %p10, align 16
+ %11 = load <8 x i16>* %p11, align 16
+ %12 = load <8 x i16>* %p12, align 16
+ %13 = load <8 x i16>* %p13, align 16
+ %14 = load <8 x i16>* %p14, align 16
+ %15 = load <8 x i16>* %p15, align 16
+ %16 = load <8 x i16>* %p16, align 16
+ %17 = load <8 x i16>* %p17, align 16
+ %18 = load <8 x i16>* %p18, align 16
+ %19 = load <8 x i16>* %p19, align 16
+ %20 = load <8 x i16>* %p20, align 16
+ %21 = load <8 x i16>* %p21, align 16
+ %22 = load <8 x i16>* %p22, align 16
+ %23 = load <8 x i16>* %p23, align 16
+ %24 = load <8 x i16>* %p24, align 16
+ %25 = load <8 x i16>* %p25, align 16
+ %26 = load <8 x i16>* %p26, align 16
+ %27 = load <8 x i16>* %p27, align 16
+ %28 = load <8 x i16>* %p28, align 16
+ %29 = load <8 x i16>* %p29, align 16
+ %30 = load <8 x i16>* %p30, align 16
+ %31 = load <8 x i16>* %p31, align 16
+ %32 = load <8 x i16>* %p32, align 16
+ %33 = load <8 x i16>* %p33, align 16
+ %r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
+ %r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2)
+ %r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3)
+ %r4 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r3, <8 x i16> %4)
+ %r5 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r4, <8 x i16> %5)
+ %r6 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r5, <8 x i16> %6)
+ %r7 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r6, <8 x i16> %7)
+ %r8 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r7, <8 x i16> %8)
+ %r9 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r8, <8 x i16> %9)
+ %r10 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r9, <8 x i16> %10)
+ %r11 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r10, <8 x i16> %11)
+ %r12 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r11, <8 x i16> %12)
+ %r13 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r12, <8 x i16> %13)
+ %r14 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r13, <8 x i16> %14)
+ %r15 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r14, <8 x i16> %15)
+ %r16 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r15, <8 x i16> %16)
+ %r17 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r16, <8 x i16> %17)
+ %r18 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r17, <8 x i16> %18)
+ %r19 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r18, <8 x i16> %19)
+ %r20 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r19, <8 x i16> %20)
+ %r21 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r20, <8 x i16> %21)
+ %r22 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r21, <8 x i16> %22)
+ %r23 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r22, <8 x i16> %23)
+ %r24 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r23, <8 x i16> %24)
+ %r25 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r24, <8 x i16> %25)
+ %r26 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r25, <8 x i16> %26)
+ %r27 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r26, <8 x i16> %27)
+ %r28 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r27, <8 x i16> %28)
+ %r29 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r28, <8 x i16> %29)
+ %r30 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r29, <8 x i16> %30)
+ %r31 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r30, <8 x i16> %31)
+ %r32 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r31, <8 x i16> %32)
+ %r33 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r32, <8 x i16> %33)
+ %rx1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r33, <8 x i16> %1)
+ %rx2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx1, <8 x i16> %2)
+ %rx3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx2, <8 x i16> %3)
+ %rx4 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx3, <8 x i16> %4)
+ %rx5 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx4, <8 x i16> %5)
+ %rx6 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx5, <8 x i16> %6)
+ %rx7 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx6, <8 x i16> %7)
+ %rx8 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx7, <8 x i16> %8)
+ %rx9 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx8, <8 x i16> %9)
+ %rx10 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx9, <8 x i16> %10)
+ %rx11 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx10, <8 x i16> %11)
+ %rx12 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx11, <8 x i16> %12)
+ %rx13 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx12, <8 x i16> %13)
+ %rx14 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx13, <8 x i16> %14)
+ %rx15 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx14, <8 x i16> %15)
+ %rx16 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx15, <8 x i16> %16)
+ %rx17 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx16, <8 x i16> %17)
+ %rx18 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx17, <8 x i16> %18)
+ %rx19 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx18, <8 x i16> %19)
+ %rx20 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx19, <8 x i16> %20)
+ %rx21 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx20, <8 x i16> %21)
+ %rx22 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx21, <8 x i16> %22)
+ %rx23 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx22, <8 x i16> %23)
+ %rx24 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx23, <8 x i16> %24)
+ %rx25 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx24, <8 x i16> %25)
+ %rx26 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx25, <8 x i16> %26)
+ %rx27 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx26, <8 x i16> %27)
+ %rx28 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx27, <8 x i16> %28)
+ %rx29 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx28, <8 x i16> %29)
+ %rx30 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx29, <8 x i16> %30)
+ %rx31 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx30, <8 x i16> %31)
+ %rx32 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx31, <8 x i16> %32)
+ %rx33 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx32, <8 x i16> %33)
+ %res = call i32 @llvm.mips.copy.s.h(<8 x i16> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: test_i16:
+; CHECK: st.h {{.*}} Spill
+; CHECK: st.h {{.*}} Spill
+; CHECK: ld.h {{.*}} Reload
+; CHECK: ld.h {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <4 x i32>* %p0, i32 1
+ %p2 = getelementptr <4 x i32>* %p0, i32 2
+ %p3 = getelementptr <4 x i32>* %p0, i32 3
+ %p4 = getelementptr <4 x i32>* %p0, i32 4
+ %p5 = getelementptr <4 x i32>* %p0, i32 5
+ %p6 = getelementptr <4 x i32>* %p0, i32 6
+ %p7 = getelementptr <4 x i32>* %p0, i32 7
+ %p8 = getelementptr <4 x i32>* %p0, i32 8
+ %p9 = getelementptr <4 x i32>* %p0, i32 9
+ %p10 = getelementptr <4 x i32>* %p0, i32 10
+ %p11 = getelementptr <4 x i32>* %p0, i32 11
+ %p12 = getelementptr <4 x i32>* %p0, i32 12
+ %p13 = getelementptr <4 x i32>* %p0, i32 13
+ %p14 = getelementptr <4 x i32>* %p0, i32 14
+ %p15 = getelementptr <4 x i32>* %p0, i32 15
+ %p16 = getelementptr <4 x i32>* %p0, i32 16
+ %p17 = getelementptr <4 x i32>* %p0, i32 17
+ %p18 = getelementptr <4 x i32>* %p0, i32 18
+ %p19 = getelementptr <4 x i32>* %p0, i32 19
+ %p20 = getelementptr <4 x i32>* %p0, i32 20
+ %p21 = getelementptr <4 x i32>* %p0, i32 21
+ %p22 = getelementptr <4 x i32>* %p0, i32 22
+ %p23 = getelementptr <4 x i32>* %p0, i32 23
+ %p24 = getelementptr <4 x i32>* %p0, i32 24
+ %p25 = getelementptr <4 x i32>* %p0, i32 25
+ %p26 = getelementptr <4 x i32>* %p0, i32 26
+ %p27 = getelementptr <4 x i32>* %p0, i32 27
+ %p28 = getelementptr <4 x i32>* %p0, i32 28
+ %p29 = getelementptr <4 x i32>* %p0, i32 29
+ %p30 = getelementptr <4 x i32>* %p0, i32 30
+ %p31 = getelementptr <4 x i32>* %p0, i32 31
+ %p32 = getelementptr <4 x i32>* %p0, i32 32
+ %p33 = getelementptr <4 x i32>* %p0, i32 33
+ %0 = load <4 x i32>* %p0, align 16
+ %1 = load <4 x i32>* %p1, align 16
+ %2 = load <4 x i32>* %p2, align 16
+ %3 = load <4 x i32>* %p3, align 16
+ %4 = load <4 x i32>* %p4, align 16
+ %5 = load <4 x i32>* %p5, align 16
+ %6 = load <4 x i32>* %p6, align 16
+ %7 = load <4 x i32>* %p7, align 16
+ %8 = load <4 x i32>* %p8, align 16
+ %9 = load <4 x i32>* %p9, align 16
+ %10 = load <4 x i32>* %p10, align 16
+ %11 = load <4 x i32>* %p11, align 16
+ %12 = load <4 x i32>* %p12, align 16
+ %13 = load <4 x i32>* %p13, align 16
+ %14 = load <4 x i32>* %p14, align 16
+ %15 = load <4 x i32>* %p15, align 16
+ %16 = load <4 x i32>* %p16, align 16
+ %17 = load <4 x i32>* %p17, align 16
+ %18 = load <4 x i32>* %p18, align 16
+ %19 = load <4 x i32>* %p19, align 16
+ %20 = load <4 x i32>* %p20, align 16
+ %21 = load <4 x i32>* %p21, align 16
+ %22 = load <4 x i32>* %p22, align 16
+ %23 = load <4 x i32>* %p23, align 16
+ %24 = load <4 x i32>* %p24, align 16
+ %25 = load <4 x i32>* %p25, align 16
+ %26 = load <4 x i32>* %p26, align 16
+ %27 = load <4 x i32>* %p27, align 16
+ %28 = load <4 x i32>* %p28, align 16
+ %29 = load <4 x i32>* %p29, align 16
+ %30 = load <4 x i32>* %p30, align 16
+ %31 = load <4 x i32>* %p31, align 16
+ %32 = load <4 x i32>* %p32, align 16
+ %33 = load <4 x i32>* %p33, align 16
+ %r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
+ %r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2)
+ %r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3)
+ %r4 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r3, <4 x i32> %4)
+ %r5 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r4, <4 x i32> %5)
+ %r6 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r5, <4 x i32> %6)
+ %r7 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r6, <4 x i32> %7)
+ %r8 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r7, <4 x i32> %8)
+ %r9 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r8, <4 x i32> %9)
+ %r10 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r9, <4 x i32> %10)
+ %r11 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r10, <4 x i32> %11)
+ %r12 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r11, <4 x i32> %12)
+ %r13 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r12, <4 x i32> %13)
+ %r14 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r13, <4 x i32> %14)
+ %r15 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r14, <4 x i32> %15)
+ %r16 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r15, <4 x i32> %16)
+ %r17 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r16, <4 x i32> %17)
+ %r18 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r17, <4 x i32> %18)
+ %r19 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r18, <4 x i32> %19)
+ %r20 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r19, <4 x i32> %20)
+ %r21 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r20, <4 x i32> %21)
+ %r22 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r21, <4 x i32> %22)
+ %r23 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r22, <4 x i32> %23)
+ %r24 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r23, <4 x i32> %24)
+ %r25 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r24, <4 x i32> %25)
+ %r26 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r25, <4 x i32> %26)
+ %r27 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r26, <4 x i32> %27)
+ %r28 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r27, <4 x i32> %28)
+ %r29 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r28, <4 x i32> %29)
+ %r30 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r29, <4 x i32> %30)
+ %r31 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r30, <4 x i32> %31)
+ %r32 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r31, <4 x i32> %32)
+ %r33 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r32, <4 x i32> %33)
+ %rx1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r33, <4 x i32> %1)
+ %rx2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx1, <4 x i32> %2)
+ %rx3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx2, <4 x i32> %3)
+ %rx4 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx3, <4 x i32> %4)
+ %rx5 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx4, <4 x i32> %5)
+ %rx6 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx5, <4 x i32> %6)
+ %rx7 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx6, <4 x i32> %7)
+ %rx8 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx7, <4 x i32> %8)
+ %rx9 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx8, <4 x i32> %9)
+ %rx10 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx9, <4 x i32> %10)
+ %rx11 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx10, <4 x i32> %11)
+ %rx12 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx11, <4 x i32> %12)
+ %rx13 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx12, <4 x i32> %13)
+ %rx14 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx13, <4 x i32> %14)
+ %rx15 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx14, <4 x i32> %15)
+ %rx16 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx15, <4 x i32> %16)
+ %rx17 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx16, <4 x i32> %17)
+ %rx18 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx17, <4 x i32> %18)
+ %rx19 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx18, <4 x i32> %19)
+ %rx20 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx19, <4 x i32> %20)
+ %rx21 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx20, <4 x i32> %21)
+ %rx22 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx21, <4 x i32> %22)
+ %rx23 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx22, <4 x i32> %23)
+ %rx24 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx23, <4 x i32> %24)
+ %rx25 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx24, <4 x i32> %25)
+ %rx26 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx25, <4 x i32> %26)
+ %rx27 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx26, <4 x i32> %27)
+ %rx28 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx27, <4 x i32> %28)
+ %rx29 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx28, <4 x i32> %29)
+ %rx30 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx29, <4 x i32> %30)
+ %rx31 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx30, <4 x i32> %31)
+ %rx32 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx31, <4 x i32> %32)
+ %rx33 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx32, <4 x i32> %33)
+ %res = call i32 @llvm.mips.copy.s.w(<4 x i32> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: test_i32:
+; CHECK: st.w {{.*}} Spill
+; CHECK: st.w {{.*}} Spill
+; CHECK: ld.w {{.*}} Reload
+; CHECK: ld.w {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <2 x i64>* %p0, i32 1
+ %p2 = getelementptr <2 x i64>* %p0, i32 2
+ %p3 = getelementptr <2 x i64>* %p0, i32 3
+ %p4 = getelementptr <2 x i64>* %p0, i32 4
+ %p5 = getelementptr <2 x i64>* %p0, i32 5
+ %p6 = getelementptr <2 x i64>* %p0, i32 6
+ %p7 = getelementptr <2 x i64>* %p0, i32 7
+ %p8 = getelementptr <2 x i64>* %p0, i32 8
+ %p9 = getelementptr <2 x i64>* %p0, i32 9
+ %p10 = getelementptr <2 x i64>* %p0, i32 10
+ %p11 = getelementptr <2 x i64>* %p0, i32 11
+ %p12 = getelementptr <2 x i64>* %p0, i32 12
+ %p13 = getelementptr <2 x i64>* %p0, i32 13
+ %p14 = getelementptr <2 x i64>* %p0, i32 14
+ %p15 = getelementptr <2 x i64>* %p0, i32 15
+ %p16 = getelementptr <2 x i64>* %p0, i32 16
+ %p17 = getelementptr <2 x i64>* %p0, i32 17
+ %p18 = getelementptr <2 x i64>* %p0, i32 18
+ %p19 = getelementptr <2 x i64>* %p0, i32 19
+ %p20 = getelementptr <2 x i64>* %p0, i32 20
+ %p21 = getelementptr <2 x i64>* %p0, i32 21
+ %p22 = getelementptr <2 x i64>* %p0, i32 22
+ %p23 = getelementptr <2 x i64>* %p0, i32 23
+ %p24 = getelementptr <2 x i64>* %p0, i32 24
+ %p25 = getelementptr <2 x i64>* %p0, i32 25
+ %p26 = getelementptr <2 x i64>* %p0, i32 26
+ %p27 = getelementptr <2 x i64>* %p0, i32 27
+ %p28 = getelementptr <2 x i64>* %p0, i32 28
+ %p29 = getelementptr <2 x i64>* %p0, i32 29
+ %p30 = getelementptr <2 x i64>* %p0, i32 30
+ %p31 = getelementptr <2 x i64>* %p0, i32 31
+ %p32 = getelementptr <2 x i64>* %p0, i32 32
+ %p33 = getelementptr <2 x i64>* %p0, i32 33
+ %0 = load <2 x i64>* %p0, align 16
+ %1 = load <2 x i64>* %p1, align 16
+ %2 = load <2 x i64>* %p2, align 16
+ %3 = load <2 x i64>* %p3, align 16
+ %4 = load <2 x i64>* %p4, align 16
+ %5 = load <2 x i64>* %p5, align 16
+ %6 = load <2 x i64>* %p6, align 16
+ %7 = load <2 x i64>* %p7, align 16
+ %8 = load <2 x i64>* %p8, align 16
+ %9 = load <2 x i64>* %p9, align 16
+ %10 = load <2 x i64>* %p10, align 16
+ %11 = load <2 x i64>* %p11, align 16
+ %12 = load <2 x i64>* %p12, align 16
+ %13 = load <2 x i64>* %p13, align 16
+ %14 = load <2 x i64>* %p14, align 16
+ %15 = load <2 x i64>* %p15, align 16
+ %16 = load <2 x i64>* %p16, align 16
+ %17 = load <2 x i64>* %p17, align 16
+ %18 = load <2 x i64>* %p18, align 16
+ %19 = load <2 x i64>* %p19, align 16
+ %20 = load <2 x i64>* %p20, align 16
+ %21 = load <2 x i64>* %p21, align 16
+ %22 = load <2 x i64>* %p22, align 16
+ %23 = load <2 x i64>* %p23, align 16
+ %24 = load <2 x i64>* %p24, align 16
+ %25 = load <2 x i64>* %p25, align 16
+ %26 = load <2 x i64>* %p26, align 16
+ %27 = load <2 x i64>* %p27, align 16
+ %28 = load <2 x i64>* %p28, align 16
+ %29 = load <2 x i64>* %p29, align 16
+ %30 = load <2 x i64>* %p30, align 16
+ %31 = load <2 x i64>* %p31, align 16
+ %32 = load <2 x i64>* %p32, align 16
+ %33 = load <2 x i64>* %p33, align 16
+ %r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
+ %r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2)
+ %r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3)
+ %r4 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r3, <2 x i64> %4)
+ %r5 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r4, <2 x i64> %5)
+ %r6 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r5, <2 x i64> %6)
+ %r7 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r6, <2 x i64> %7)
+ %r8 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r7, <2 x i64> %8)
+ %r9 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r8, <2 x i64> %9)
+ %r10 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r9, <2 x i64> %10)
+ %r11 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r10, <2 x i64> %11)
+ %r12 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r11, <2 x i64> %12)
+ %r13 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r12, <2 x i64> %13)
+ %r14 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r13, <2 x i64> %14)
+ %r15 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r14, <2 x i64> %15)
+ %r16 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r15, <2 x i64> %16)
+ %r17 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r16, <2 x i64> %17)
+ %r18 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r17, <2 x i64> %18)
+ %r19 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r18, <2 x i64> %19)
+ %r20 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r19, <2 x i64> %20)
+ %r21 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r20, <2 x i64> %21)
+ %r22 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r21, <2 x i64> %22)
+ %r23 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r22, <2 x i64> %23)
+ %r24 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r23, <2 x i64> %24)
+ %r25 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r24, <2 x i64> %25)
+ %r26 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r25, <2 x i64> %26)
+ %r27 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r26, <2 x i64> %27)
+ %r28 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r27, <2 x i64> %28)
+ %r29 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r28, <2 x i64> %29)
+ %r30 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r29, <2 x i64> %30)
+ %r31 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r30, <2 x i64> %31)
+ %r32 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r31, <2 x i64> %32)
+ %r33 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r32, <2 x i64> %33)
+ %rx1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r33, <2 x i64> %1)
+ %rx2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx1, <2 x i64> %2)
+ %rx3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx2, <2 x i64> %3)
+ %rx4 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx3, <2 x i64> %4)
+ %rx5 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx4, <2 x i64> %5)
+ %rx6 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx5, <2 x i64> %6)
+ %rx7 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx6, <2 x i64> %7)
+ %rx8 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx7, <2 x i64> %8)
+ %rx9 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx8, <2 x i64> %9)
+ %rx10 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx9, <2 x i64> %10)
+ %rx11 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx10, <2 x i64> %11)
+ %rx12 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx11, <2 x i64> %12)
+ %rx13 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx12, <2 x i64> %13)
+ %rx14 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx13, <2 x i64> %14)
+ %rx15 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx14, <2 x i64> %15)
+ %rx16 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx15, <2 x i64> %16)
+ %rx17 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx16, <2 x i64> %17)
+ %rx18 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx17, <2 x i64> %18)
+ %rx19 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx18, <2 x i64> %19)
+ %rx20 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx19, <2 x i64> %20)
+ %rx21 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx20, <2 x i64> %21)
+ %rx22 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx21, <2 x i64> %22)
+ %rx23 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx22, <2 x i64> %23)
+ %rx24 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx23, <2 x i64> %24)
+ %rx25 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx24, <2 x i64> %25)
+ %rx26 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx25, <2 x i64> %26)
+ %rx27 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx26, <2 x i64> %27)
+ %rx28 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx27, <2 x i64> %28)
+ %rx29 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx28, <2 x i64> %29)
+ %rx30 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx29, <2 x i64> %30)
+ %rx31 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx30, <2 x i64> %31)
+ %rx32 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx31, <2 x i64> %32)
+ %rx33 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx32, <2 x i64> %33)
+ %res1 = bitcast <2 x i64> %rx33 to <4 x i32>
+ %res = call i32 @llvm.mips.copy.s.w(<4 x i32> %res1, i32 0)
+ ret i32 %res
+}
+
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: test_i64:
+; CHECK: st.d {{.*}} Spill
+; CHECK: st.d {{.*}} Spill
+; CHECK: ld.d {{.*}} Reload
+; CHECK: ld.d {{.*}} Reload
+; CHECK: .size
diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll
new file mode 100644
index 0000000..5bddf5a
--- /dev/null
+++ b/test/CodeGen/Mips/msa/vec.ll
@@ -0,0 +1,946 @@
+; Test the MSA intrinsics that are encoded with the VEC instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
+
+@llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_and_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_b_test
+;
+@llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_and_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_and_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_h_test
+;
+@llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_and_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_and_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_w_test
+;
+@llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_and_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_and_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_d_test
+;
+define void @and_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %2 = and <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
+ ret void
+}
+
+; CHECK: and_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: and.v
+; CHECK: st.b
+; CHECK: .size and_v_b_test
+;
+define void @and_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %2 = and <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
+ ret void
+}
+
+; CHECK: and_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: and.v
+; CHECK: st.h
+; CHECK: .size and_v_h_test
+;
+
+define void @and_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %2 = and <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
+ ret void
+}
+
+; CHECK: and_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: and.v
+; CHECK: st.w
+; CHECK: .size and_v_w_test
+;
+
+define void @and_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %2 = and <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
+ ret void
+}
+
+; CHECK: and_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: and.v
+; CHECK: st.d
+; CHECK: .size and_v_d_test
+;
+@llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmnz_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
+
+@llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmnz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bmnz_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
+
+@llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmnz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bmnz_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
+
+@llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmnz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bmnz_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
+
+@llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmz_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_b_test
+
+@llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bmz_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_h_test
+
+@llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bmz_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_w_test
+
+@llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bmz_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_d_test
+
+@llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bsel_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_b_test
+
+@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bsel_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bsel_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_h_test
+
+@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bsel_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bsel_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_w_test
+
+@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bsel_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bsel_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_d_test
+
+@llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_b_test
+;
+@llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_nor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_h_test
+;
+@llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_nor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_w_test
+;
+@llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_nor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_d_test
+;
+@llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_or_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_b_test
+;
+@llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_or_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_or_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_h_test
+;
+@llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_or_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_or_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_w_test
+;
+@llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_or_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_or_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_d_test
+;
+define void @or_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %2 = or <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
+ ret void
+}
+
+; CHECK: or_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: or.v
+; CHECK: st.b
+; CHECK: .size or_v_b_test
+;
+define void @or_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %2 = or <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
+ ret void
+}
+
+; CHECK: or_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: or.v
+; CHECK: st.h
+; CHECK: .size or_v_h_test
+;
+
+define void @or_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %2 = or <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
+ ret void
+}
+
+; CHECK: or_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: or.v
+; CHECK: st.w
+; CHECK: .size or_v_w_test
+;
+
+define void @or_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %2 = or <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
+ ret void
+}
+
+; CHECK: or_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: or.v
+; CHECK: st.d
+; CHECK: .size or_v_d_test
+;
+@llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_xor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_b_test
+;
+@llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_xor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_xor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_h_test
+;
+@llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_xor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_xor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_w_test
+;
+@llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_xor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_xor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_d_test
+;
+define void @xor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %2 = xor <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
+ ret void
+}
+
+; CHECK: xor_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: xor.v
+; CHECK: st.b
+; CHECK: .size xor_v_b_test
+;
+define void @xor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %2 = xor <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
+ ret void
+}
+
+; CHECK: xor_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: xor.v
+; CHECK: st.h
+; CHECK: .size xor_v_h_test
+;
+
+define void @xor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %2 = xor <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
+ ret void
+}
+
+; CHECK: xor_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: xor.v
+; CHECK: st.w
+; CHECK: .size xor_v_w_test
+;
+
+define void @xor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %2 = xor <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
+ ret void
+}
+
+; CHECK: xor_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: xor.v
+; CHECK: st.d
+; CHECK: .size xor_v_d_test
+;
+declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind
diff --git a/test/CodeGen/Mips/msa/vecs10.ll b/test/CodeGen/Mips/msa/vecs10.ll
new file mode 100644
index 0000000..e22e075
--- /dev/null
+++ b/test/CodeGen/Mips/msa/vecs10.ll
@@ -0,0 +1,47 @@
+; Test the MSA intrinsics that are encoded with the VECS10 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bnz_v_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bnz_v_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnz_v_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bnz_v_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.v [[R0]]
+; CHECK: .size llvm_mips_bnz_v_test
+
+@llvm_mips_bz_v_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bz_v_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bz_v_ARG1
+ %1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bz.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bz_v_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bz.v [[R0]]
+; CHECK: .size llvm_mips_bz_v_test
+;
diff --git a/test/CodeGen/Mips/nomips16.ll b/test/CodeGen/Mips/nomips16.ll
new file mode 100644
index 0000000..bf7c667
--- /dev/null
+++ b/test/CodeGen/Mips/nomips16.ll
@@ -0,0 +1,38 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+
+@x = global float 0.000000e+00, align 4
+@.str = private unnamed_addr constant [20 x i8] c"in main: mips16 %f\0A\00", align 1
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %conv = fpext float %0 to double
+ %add = fadd double %conv, 1.500000e+00
+ %conv1 = fptrunc double %add to float
+ store float %conv1, float* @x, align 4
+ ret void
+}
+; CHECK: .ent foo
+; CHECK: jal __mips16_extendsfdf2
+; CHECK: .end foo
+
+; Function Attrs: nounwind
+define void @nofoo() #1 {
+entry:
+ %0 = load float* @x, align 4
+ %conv = fpext float %0 to double
+ %add = fadd double %conv, 3.900000e+00
+ %conv1 = fptrunc double %add to float
+ store float %conv1, float* @x, align 4
+ ret void
+}
+
+; CHECK: .ent nofoo
+; CHECK: cvt.d.s $f{{.+}}, $f{{.+}}
+; CHECK: .end nofoo
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
diff --git a/test/CodeGen/Mips/o32_cc.ll b/test/CodeGen/Mips/o32_cc.ll
index 70b66ef..08e5aab 100644
--- a/test/CodeGen/Mips/o32_cc.ll
+++ b/test/CodeGen/Mips/o32_cc.ll
@@ -1,11 +1,12 @@
-; RUN: llc -march=mips < %s | FileCheck %s
-
-; FIXME: Disabled because it unpredictably fails on certain platforms.
-; REQUIRES: disabled
+; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck -check-prefix=FP32EL %s
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck -check-prefix=FP64EL %s
; $f12, $f14
-; CHECK: ldc1 $f12, %lo
-; CHECK: ldc1 $f14, %lo
+; CHECK-LABEL: testlowercall0:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: ldc1 $f14, %lo
define void @testlowercall0() nounwind {
entry:
tail call void @f0(double 5.000000e+00, double 6.000000e+00) nounwind
@@ -15,8 +16,9 @@ entry:
declare void @f0(double, double)
; $f12, $f14
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
+; CHECK-LABEL: testlowercall1:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
define void @testlowercall1() nounwind {
entry:
tail call void @f1(float 8.000000e+00, float 9.000000e+00) nounwind
@@ -26,8 +28,9 @@ entry:
declare void @f1(float, float)
; $f12, $f14
-; CHECK: lwc1 $f12, %lo
-; CHECK: ldc1 $f14, %lo
+; CHECK-LABEL: testlowercall2:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: ldc1 $f14, %lo
define void @testlowercall2() nounwind {
entry:
tail call void @f2(float 8.000000e+00, double 6.000000e+00) nounwind
@@ -37,8 +40,9 @@ entry:
declare void @f2(float, double)
; $f12, $f14
-; CHECK: ldc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
+; CHECK-LABEL: testlowercall3:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
define void @testlowercall3() nounwind {
entry:
tail call void @f3(double 5.000000e+00, float 9.000000e+00) nounwind
@@ -48,10 +52,11 @@ entry:
declare void @f3(double, float)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 12
-; CHECK: addiu $5, $zero, 13
-; CHECK: addiu $6, $zero, 14
-; CHECK: addiu $7, $zero, 15
+; CHECK-LABEL: testlowercall4:
+; CHECK-DAG: addiu $4, $zero, 12
+; CHECK-DAG: addiu $5, $zero, 13
+; CHECK-DAG: addiu $6, $zero, 14
+; CHECK-DAG: addiu $7, $zero, 15
define void @testlowercall4() nounwind {
entry:
tail call void @f4(i32 12, i32 13, i32 14, i32 15) nounwind
@@ -61,10 +66,11 @@ entry:
declare void @f4(i32, i32, i32, i32)
; $f12, $6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 23
+; CHECK-LABEL: testlowercall5:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 23
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall5() nounwind {
entry:
tail call void @f5(double 1.500000e+01, i32 23, double 1.700000e+01) nounwind
@@ -74,9 +80,10 @@ entry:
declare void @f5(double, i32, double)
; $f12, $6, $7
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 33
-; CHECK: addiu $7, $zero, 24
+; CHECK-LABEL: testlowercall6:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 33
+; CHECK-DAG: addiu $7, $zero, 24
define void @testlowercall6() nounwind {
entry:
tail call void @f6(double 2.500000e+01, i32 33, i32 24) nounwind
@@ -86,9 +93,10 @@ entry:
declare void @f6(double, i32, i32)
; $f12, $5, $6
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 43
-; CHECK: addiu $6, $zero, 34
+; CHECK-LABEL: testlowercall7:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 43
+; CHECK-DAG: addiu $6, $zero, 34
define void @testlowercall7() nounwind {
entry:
tail call void @f7(float 1.800000e+01, i32 43, i32 34) nounwind
@@ -98,11 +106,12 @@ entry:
declare void @f7(float, i32, i32)
; $4, $5, $6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: addiu $4, $zero, 22
-; CHECK: addiu $5, $zero, 53
-; CHECK: addiu $6, $zero, 44
+; CHECK-LABEL: testlowercall8:
+; CHECK-DAG: addiu $4, $zero, 22
+; CHECK-DAG: addiu $5, $zero, 53
+; CHECK-DAG: addiu $6, $zero, 44
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall8() nounwind {
entry:
tail call void @f8(i32 22, i32 53, i32 44, double 4.000000e+00) nounwind
@@ -112,10 +121,11 @@ entry:
declare void @f8(i32, i32, i32, double)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 32
-; CHECK: addiu $5, $zero, 63
-; CHECK: addiu $6, $zero, 54
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall9:
+; CHECK-DAG: addiu $4, $zero, 32
+; CHECK-DAG: addiu $5, $zero, 63
+; CHECK-DAG: addiu $6, $zero, 54
+; CHECK-DAG: lui $7, 16688
define void @testlowercall9() nounwind {
entry:
tail call void @f9(i32 32, i32 63, i32 54, float 1.100000e+01) nounwind
@@ -125,10 +135,15 @@ entry:
declare void @f9(i32, i32, i32, float)
; $4, $5, ($6, $7)
-; CHECK: addiu $4, $zero, 42
-; CHECK: addiu $5, $zero, 73
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall10:
+; CHECK-DAG: addiu $4, $zero, 42
+; CHECK-DAG: addiu $5, $zero, 73
+; FP32EL-LABEL: testlowercall10:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall10:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall10() nounwind {
entry:
tail call void @f10(i32 42, i32 73, double 2.700000e+01) nounwind
@@ -138,9 +153,14 @@ entry:
declare void @f10(i32, i32, double)
; $4, ($6, $7)
-; CHECK: addiu $4, $zero, 52
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall11:
+; CHECK-DAG: addiu $4, $zero, 52
+; FP32EL-LABEL: testlowercall11:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall11:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall11() nounwind {
entry:
tail call void @f11(i32 52, double 1.600000e+01) nounwind
@@ -150,10 +170,11 @@ entry:
declare void @f11(i32, double)
; $f12, $f14, $6, $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $6
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall12:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $6, 16672
+; CHECK-DAG: lui $7, 16808
define void @testlowercall12() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
@@ -163,10 +184,11 @@ entry:
declare void @f12(float, float, float, float)
; $f12, $5, $6, $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 83
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 25
+; CHECK-LABEL: testlowercall13:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 83
+; CHECK-DAG: lui $6, 16800
+; CHECK-DAG: addiu $7, $zero, 25
define void @testlowercall13() nounwind {
entry:
tail call void @f13(float 3.800000e+01, i32 83, float 2.000000e+01, i32 25) nounwind
@@ -177,9 +199,10 @@ entry:
declare void @f13(float, i32, float, i32)
; $f12, $f14, $7
-; CHECK: ldc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall14:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $7, 16880
define void @testlowercall14() nounwind {
entry:
tail call void @f14(double 3.500000e+01, float 2.900000e+01, float 3.000000e+01) nounwind
@@ -189,10 +212,15 @@ entry:
declare void @f14(double, float, float)
; $f12, $f14, ($6, $7)
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall15:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; FP32EL-LABEL: testlowercall15:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall15:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall15() nounwind {
entry:
tail call void @f15(float 4.800000e+01, float 3.900000e+01, double 3.700000e+01) nounwind
@@ -202,10 +230,11 @@ entry:
declare void @f15(float, float, double)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 62
-; CHECK: ori $5
-; CHECK: addiu $6, $zero, 64
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall16:
+; CHECK-DAG: addiu $4, $zero, 62
+; CHECK-DAG: lui $5, 16964
+; CHECK-DAG: addiu $6, $zero, 64
+; CHECK-DAG: lui $7, 16888
define void @testlowercall16() nounwind {
entry:
tail call void @f16(i32 62, float 4.900000e+01, i32 64, float 3.100000e+01) nounwind
@@ -215,10 +244,11 @@ entry:
declare void @f16(i32, float, i32, float)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 72
-; CHECK: ori $5
-; CHECK: addiu $6, $zero, 74
-; CHECK: addiu $7, $zero, 35
+; CHECK-LABEL: testlowercall17:
+; CHECK-DAG: addiu $4, $zero, 72
+; CHECK-DAG: lui $5, 17004
+; CHECK-DAG: addiu $6, $zero, 74
+; CHECK-DAG: addiu $7, $zero, 35
define void @testlowercall17() nounwind {
entry:
tail call void @f17(i32 72, float 5.900000e+01, i32 74, i32 35) nounwind
@@ -228,10 +258,11 @@ entry:
declare void @f17(i32, float, i32, i32)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 82
-; CHECK: addiu $5, $zero, 93
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 45
+; CHECK-LABEL: testlowercall18:
+; CHECK-DAG: addiu $4, $zero, 82
+; CHECK-DAG: addiu $5, $zero, 93
+; CHECK-DAG: lui $6, 16928
+; CHECK-DAG: addiu $7, $zero, 45
define void @testlowercall18() nounwind {
entry:
tail call void @f18(i32 82, i32 93, float 4.000000e+01, i32 45) nounwind
@@ -242,11 +273,16 @@ declare void @f18(i32, i32, float, i32)
; $4, ($6, $7), stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: addiu $4, $zero, 92
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall20:
+; CHECK-DAG: addiu $4, $zero, 92
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+; FP32EL-LABEL: testlowercall20:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall20:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall20() nounwind {
entry:
tail call void @f20(i32 92, double 2.600000e+01, double 4.700000e+01) nounwind
@@ -256,8 +292,9 @@ entry:
declare void @f20(i32, double, double)
; $f12, $5
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 103
+; CHECK-LABEL: testlowercall21:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 103
define void @testlowercall21() nounwind {
entry:
tail call void @f21(float 5.800000e+01, i32 103) nounwind
@@ -267,10 +304,15 @@ entry:
declare void @f21(float, i32)
; $f12, $5, ($6, $7)
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 113
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall22:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 113
+; FP32EL-LABEL: testlowercall22:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall22:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall22() nounwind {
entry:
tail call void @f22(float 6.800000e+01, i32 113, double 5.700000e+01) nounwind
@@ -280,8 +322,9 @@ entry:
declare void @f22(float, i32, double)
; $f12, f6
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 123
+; CHECK-LABEL: testlowercall23:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 123
define void @testlowercall23() nounwind {
entry:
tail call void @f23(double 4.500000e+01, i32 123) nounwind
@@ -291,10 +334,11 @@ entry:
declare void @f23(double, i32)
; $f12,$6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 133
+; CHECK-LABEL: testlowercall24:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 133
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall24() nounwind {
entry:
tail call void @f24(double 5.500000e+01, i32 133, double 6.700000e+01) nounwind
@@ -303,19 +347,19 @@ entry:
declare void @f24(double, i32, double)
-; CHECK: lwc1 $f12, %lo
-; lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $6
-; CHECK: ori $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 83
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 25
-; CHECK: addiu $4, $zero, 82
-; CHECK: addiu $5, $zero, 93
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 45
+; CHECK-LABEL: testlowercall25:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $6
+; CHECK-DAG: lui $7
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 83
+; CHECK-DAG: lui $6
+; CHECK-DAG: addiu $7, $zero, 25
+; CHECK-DAG: addiu $4, $zero, 82
+; CHECK-DAG: addiu $5, $zero, 93
+; CHECK-DAG: lui $6
+; CHECK-DAG: addiu $7, $zero, 45
define void @testlowercall25() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll
index 0a8f85f..5db47ac 100644
--- a/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/test/CodeGen/Mips/o32_cc_byval.ll
@@ -10,22 +10,23 @@
define void @f1() nounwind {
entry:
-; CHECK: lw $[[R1:[0-9]+]], %got(f1.s1)
-; CHECK: addiu $[[R0:[0-9]+]], $[[R1]], %lo(f1.s1)
-; CHECK: lw $[[R7:[0-9]+]], 12($[[R0]])
-; CHECK: lw $[[R3:[0-9]+]], 16($[[R0]])
-; CHECK: lw $[[R4:[0-9]+]], 20($[[R0]])
-; CHECK: lw $[[R5:[0-9]+]], 24($[[R0]])
-; CHECK: lw $[[R6:[0-9]+]], 28($[[R0]])
-; CHECK: sw $[[R6]], 36($sp)
-; CHECK: sw $[[R5]], 32($sp)
-; CHECK: sw $[[R4]], 28($sp)
-; CHECK: sw $[[R3]], 24($sp)
-; CHECK: sw $[[R7]], 20($sp)
-; CHECK: lw $[[R2:[0-9]+]], 8($[[R0]])
-; CHECK: sw $[[R2]], 16($sp)
-; CHECK: lw $6, %lo(f1.s1)($[[R1]])
-; CHECK: lw $7, 4($[[R0]])
+; CHECK-LABEL: f1:
+; CHECK-DAG: lw $[[R1:[0-9]+]], %got(f1.s1)
+; CHECK-DAG: addiu $[[R0:[0-9]+]], $[[R1]], %lo(f1.s1)
+; CHECK-DAG: lw $[[R7:[0-9]+]], 12($[[R0]])
+; CHECK-DAG: lw $[[R3:[0-9]+]], 16($[[R0]])
+; CHECK-DAG: lw $[[R4:[0-9]+]], 20($[[R0]])
+; CHECK-DAG: lw $[[R5:[0-9]+]], 24($[[R0]])
+; CHECK-DAG: lw $[[R6:[0-9]+]], 28($[[R0]])
+; CHECK-DAG: sw $[[R6]], 36($sp)
+; CHECK-DAG: sw $[[R5]], 32($sp)
+; CHECK-DAG: sw $[[R4]], 28($sp)
+; CHECK-DAG: sw $[[R3]], 24($sp)
+; CHECK-DAG: sw $[[R7]], 20($sp)
+; CHECK-DAG: lw $[[R2:[0-9]+]], 8($[[R0]])
+; CHECK-DAG: sw $[[R2]], 16($sp)
+; CHECK-DAG: lw $6, %lo(f1.s1)($[[R1]])
+; CHECK-DAG: lw $7, 4($[[R0]])
%agg.tmp10 = alloca %struct.S3, align 4
call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
call void @callee2(%struct.S2* byval @f1.s2) nounwind
@@ -61,17 +62,17 @@ entry:
; CHECK: mfc1 $6, $f[[F0]]
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp = load i32* %i2, align 4, !tbaa !0
+ %tmp = load i32* %i2, align 4
%d = getelementptr inbounds %struct.S1* %s1, i32 0, i32 4
- %tmp1 = load double* %d, align 8, !tbaa !3
+ %tmp1 = load double* %d, align 8
%ll = getelementptr inbounds %struct.S1* %s1, i32 0, i32 3
- %tmp2 = load i64* %ll, align 8, !tbaa !4
+ %tmp2 = load i64* %ll, align 8
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp3 = load i32* %i, align 4, !tbaa !0
+ %tmp3 = load i32* %i, align 4
%s = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1
- %tmp4 = load i16* %s, align 2, !tbaa !5
+ %tmp4 = load i16* %s, align 2
%c = getelementptr inbounds %struct.S1* %s1, i32 0, i32 0
- %tmp5 = load i8* %c, align 1, !tbaa !1
+ %tmp5 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
ret void
}
@@ -90,9 +91,9 @@ entry:
; CHECK: sw $[[R0]], 24($sp)
%arrayidx = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 0
- %tmp = load i32* %arrayidx, align 4, !tbaa !0
+ %tmp = load i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 3
- %tmp3 = load i32* %arrayidx2, align 4, !tbaa !0
+ %tmp3 = load i32* %arrayidx2, align 4
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
ret void
}
@@ -110,11 +111,11 @@ entry:
; CHECK: sw $[[R1]], 24($sp)
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp = load i32* %i, align 4, !tbaa !0
+ %tmp = load i32* %i, align 4
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp1 = load i32* %i2, align 4, !tbaa !0
+ %tmp1 = load i32* %i2, align 4
%c = getelementptr inbounds %struct.S3* %s3, i32 0, i32 0
- %tmp2 = load i8* %c, align 1, !tbaa !1
+ %tmp2 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
ret void
}
@@ -128,10 +129,3 @@ entry:
}
declare void @f6(%struct.S4* nocapture byval, i64)
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"double", metadata !1}
-!4 = metadata !{metadata !"long long", metadata !1}
-!5 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/Mips/powif64_16.ll b/test/CodeGen/Mips/powif64_16.ll
new file mode 100644
index 0000000..35a7ca9
--- /dev/null
+++ b/test/CodeGen/Mips/powif64_16.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+
+declare float @llvm.powi.f32(float %Val, i32 %power)
+declare double @llvm.powi.f64(double %Val, i32 %power)
+
+define float @foo_pow_f32(float %y, i32 %p) {
+ %1 = tail call float @llvm.powi.f32(float %y, i32 %p)
+; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f32
+; CHECK-NOT: {{.*}} jal llvm.powi.f32
+ ret float %1
+}
+
+define double @foo_pow_f64(double %y, i32 %p) {
+ %1 = tail call double @llvm.powi.f64(double %y, i32 %p)
+; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f64
+; CHECK-NOT: {{.*}} jal llvm.powi.f64
+ ret double %1
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readonly }
+
+!0 = metadata !{metadata !"double", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/CodeGen/Mips/ra-allocatable.ll b/test/CodeGen/Mips/ra-allocatable.ll
index 7621788..afc5cb0 100644
--- a/test/CodeGen/Mips/ra-allocatable.ll
+++ b/test/CodeGen/Mips/ra-allocatable.ll
@@ -98,191 +98,186 @@ entry:
; CHECK: lw $ra, {{[0-9]+}}($sp) # 4-byte Folded Reload
; CHECK: jr $ra
- %0 = load i32* @a0, align 4, !tbaa !0
- %1 = load i32** @b0, align 4, !tbaa !3
- store i32 %0, i32* %1, align 4, !tbaa !0
- %2 = load i32* @a1, align 4, !tbaa !0
- %3 = load i32** @b1, align 4, !tbaa !3
- store i32 %2, i32* %3, align 4, !tbaa !0
- %4 = load i32* @a2, align 4, !tbaa !0
- %5 = load i32** @b2, align 4, !tbaa !3
- store i32 %4, i32* %5, align 4, !tbaa !0
- %6 = load i32* @a3, align 4, !tbaa !0
- %7 = load i32** @b3, align 4, !tbaa !3
- store i32 %6, i32* %7, align 4, !tbaa !0
- %8 = load i32* @a4, align 4, !tbaa !0
- %9 = load i32** @b4, align 4, !tbaa !3
- store i32 %8, i32* %9, align 4, !tbaa !0
- %10 = load i32* @a5, align 4, !tbaa !0
- %11 = load i32** @b5, align 4, !tbaa !3
- store i32 %10, i32* %11, align 4, !tbaa !0
- %12 = load i32* @a6, align 4, !tbaa !0
- %13 = load i32** @b6, align 4, !tbaa !3
- store i32 %12, i32* %13, align 4, !tbaa !0
- %14 = load i32* @a7, align 4, !tbaa !0
- %15 = load i32** @b7, align 4, !tbaa !3
- store i32 %14, i32* %15, align 4, !tbaa !0
- %16 = load i32* @a8, align 4, !tbaa !0
- %17 = load i32** @b8, align 4, !tbaa !3
- store i32 %16, i32* %17, align 4, !tbaa !0
- %18 = load i32* @a9, align 4, !tbaa !0
- %19 = load i32** @b9, align 4, !tbaa !3
- store i32 %18, i32* %19, align 4, !tbaa !0
- %20 = load i32* @a10, align 4, !tbaa !0
- %21 = load i32** @b10, align 4, !tbaa !3
- store i32 %20, i32* %21, align 4, !tbaa !0
- %22 = load i32* @a11, align 4, !tbaa !0
- %23 = load i32** @b11, align 4, !tbaa !3
- store i32 %22, i32* %23, align 4, !tbaa !0
- %24 = load i32* @a12, align 4, !tbaa !0
- %25 = load i32** @b12, align 4, !tbaa !3
- store i32 %24, i32* %25, align 4, !tbaa !0
- %26 = load i32* @a13, align 4, !tbaa !0
- %27 = load i32** @b13, align 4, !tbaa !3
- store i32 %26, i32* %27, align 4, !tbaa !0
- %28 = load i32* @a14, align 4, !tbaa !0
- %29 = load i32** @b14, align 4, !tbaa !3
- store i32 %28, i32* %29, align 4, !tbaa !0
- %30 = load i32* @a15, align 4, !tbaa !0
- %31 = load i32** @b15, align 4, !tbaa !3
- store i32 %30, i32* %31, align 4, !tbaa !0
- %32 = load i32* @a16, align 4, !tbaa !0
- %33 = load i32** @b16, align 4, !tbaa !3
- store i32 %32, i32* %33, align 4, !tbaa !0
- %34 = load i32* @a17, align 4, !tbaa !0
- %35 = load i32** @b17, align 4, !tbaa !3
- store i32 %34, i32* %35, align 4, !tbaa !0
- %36 = load i32* @a18, align 4, !tbaa !0
- %37 = load i32** @b18, align 4, !tbaa !3
- store i32 %36, i32* %37, align 4, !tbaa !0
- %38 = load i32* @a19, align 4, !tbaa !0
- %39 = load i32** @b19, align 4, !tbaa !3
- store i32 %38, i32* %39, align 4, !tbaa !0
- %40 = load i32* @a20, align 4, !tbaa !0
- %41 = load i32** @b20, align 4, !tbaa !3
- store i32 %40, i32* %41, align 4, !tbaa !0
- %42 = load i32* @a21, align 4, !tbaa !0
- %43 = load i32** @b21, align 4, !tbaa !3
- store i32 %42, i32* %43, align 4, !tbaa !0
- %44 = load i32* @a22, align 4, !tbaa !0
- %45 = load i32** @b22, align 4, !tbaa !3
- store i32 %44, i32* %45, align 4, !tbaa !0
- %46 = load i32* @a23, align 4, !tbaa !0
- %47 = load i32** @b23, align 4, !tbaa !3
- store i32 %46, i32* %47, align 4, !tbaa !0
- %48 = load i32* @a24, align 4, !tbaa !0
- %49 = load i32** @b24, align 4, !tbaa !3
- store i32 %48, i32* %49, align 4, !tbaa !0
- %50 = load i32* @a25, align 4, !tbaa !0
- %51 = load i32** @b25, align 4, !tbaa !3
- store i32 %50, i32* %51, align 4, !tbaa !0
- %52 = load i32* @a26, align 4, !tbaa !0
- %53 = load i32** @b26, align 4, !tbaa !3
- store i32 %52, i32* %53, align 4, !tbaa !0
- %54 = load i32* @a27, align 4, !tbaa !0
- %55 = load i32** @b27, align 4, !tbaa !3
- store i32 %54, i32* %55, align 4, !tbaa !0
- %56 = load i32* @a28, align 4, !tbaa !0
- %57 = load i32** @b28, align 4, !tbaa !3
- store i32 %56, i32* %57, align 4, !tbaa !0
- %58 = load i32* @a29, align 4, !tbaa !0
- %59 = load i32** @b29, align 4, !tbaa !3
- store i32 %58, i32* %59, align 4, !tbaa !0
- %60 = load i32* @a0, align 4, !tbaa !0
- %61 = load i32** @c0, align 4, !tbaa !3
- store i32 %60, i32* %61, align 4, !tbaa !0
- %62 = load i32* @a1, align 4, !tbaa !0
- %63 = load i32** @c1, align 4, !tbaa !3
- store i32 %62, i32* %63, align 4, !tbaa !0
- %64 = load i32* @a2, align 4, !tbaa !0
- %65 = load i32** @c2, align 4, !tbaa !3
- store i32 %64, i32* %65, align 4, !tbaa !0
- %66 = load i32* @a3, align 4, !tbaa !0
- %67 = load i32** @c3, align 4, !tbaa !3
- store i32 %66, i32* %67, align 4, !tbaa !0
- %68 = load i32* @a4, align 4, !tbaa !0
- %69 = load i32** @c4, align 4, !tbaa !3
- store i32 %68, i32* %69, align 4, !tbaa !0
- %70 = load i32* @a5, align 4, !tbaa !0
- %71 = load i32** @c5, align 4, !tbaa !3
- store i32 %70, i32* %71, align 4, !tbaa !0
- %72 = load i32* @a6, align 4, !tbaa !0
- %73 = load i32** @c6, align 4, !tbaa !3
- store i32 %72, i32* %73, align 4, !tbaa !0
- %74 = load i32* @a7, align 4, !tbaa !0
- %75 = load i32** @c7, align 4, !tbaa !3
- store i32 %74, i32* %75, align 4, !tbaa !0
- %76 = load i32* @a8, align 4, !tbaa !0
- %77 = load i32** @c8, align 4, !tbaa !3
- store i32 %76, i32* %77, align 4, !tbaa !0
- %78 = load i32* @a9, align 4, !tbaa !0
- %79 = load i32** @c9, align 4, !tbaa !3
- store i32 %78, i32* %79, align 4, !tbaa !0
- %80 = load i32* @a10, align 4, !tbaa !0
- %81 = load i32** @c10, align 4, !tbaa !3
- store i32 %80, i32* %81, align 4, !tbaa !0
- %82 = load i32* @a11, align 4, !tbaa !0
- %83 = load i32** @c11, align 4, !tbaa !3
- store i32 %82, i32* %83, align 4, !tbaa !0
- %84 = load i32* @a12, align 4, !tbaa !0
- %85 = load i32** @c12, align 4, !tbaa !3
- store i32 %84, i32* %85, align 4, !tbaa !0
- %86 = load i32* @a13, align 4, !tbaa !0
- %87 = load i32** @c13, align 4, !tbaa !3
- store i32 %86, i32* %87, align 4, !tbaa !0
- %88 = load i32* @a14, align 4, !tbaa !0
- %89 = load i32** @c14, align 4, !tbaa !3
- store i32 %88, i32* %89, align 4, !tbaa !0
- %90 = load i32* @a15, align 4, !tbaa !0
- %91 = load i32** @c15, align 4, !tbaa !3
- store i32 %90, i32* %91, align 4, !tbaa !0
- %92 = load i32* @a16, align 4, !tbaa !0
- %93 = load i32** @c16, align 4, !tbaa !3
- store i32 %92, i32* %93, align 4, !tbaa !0
- %94 = load i32* @a17, align 4, !tbaa !0
- %95 = load i32** @c17, align 4, !tbaa !3
- store i32 %94, i32* %95, align 4, !tbaa !0
- %96 = load i32* @a18, align 4, !tbaa !0
- %97 = load i32** @c18, align 4, !tbaa !3
- store i32 %96, i32* %97, align 4, !tbaa !0
- %98 = load i32* @a19, align 4, !tbaa !0
- %99 = load i32** @c19, align 4, !tbaa !3
- store i32 %98, i32* %99, align 4, !tbaa !0
- %100 = load i32* @a20, align 4, !tbaa !0
- %101 = load i32** @c20, align 4, !tbaa !3
- store i32 %100, i32* %101, align 4, !tbaa !0
- %102 = load i32* @a21, align 4, !tbaa !0
- %103 = load i32** @c21, align 4, !tbaa !3
- store i32 %102, i32* %103, align 4, !tbaa !0
- %104 = load i32* @a22, align 4, !tbaa !0
- %105 = load i32** @c22, align 4, !tbaa !3
- store i32 %104, i32* %105, align 4, !tbaa !0
- %106 = load i32* @a23, align 4, !tbaa !0
- %107 = load i32** @c23, align 4, !tbaa !3
- store i32 %106, i32* %107, align 4, !tbaa !0
- %108 = load i32* @a24, align 4, !tbaa !0
- %109 = load i32** @c24, align 4, !tbaa !3
- store i32 %108, i32* %109, align 4, !tbaa !0
- %110 = load i32* @a25, align 4, !tbaa !0
- %111 = load i32** @c25, align 4, !tbaa !3
- store i32 %110, i32* %111, align 4, !tbaa !0
- %112 = load i32* @a26, align 4, !tbaa !0
- %113 = load i32** @c26, align 4, !tbaa !3
- store i32 %112, i32* %113, align 4, !tbaa !0
- %114 = load i32* @a27, align 4, !tbaa !0
- %115 = load i32** @c27, align 4, !tbaa !3
- store i32 %114, i32* %115, align 4, !tbaa !0
- %116 = load i32* @a28, align 4, !tbaa !0
- %117 = load i32** @c28, align 4, !tbaa !3
- store i32 %116, i32* %117, align 4, !tbaa !0
- %118 = load i32* @a29, align 4, !tbaa !0
- %119 = load i32** @c29, align 4, !tbaa !3
- store i32 %118, i32* %119, align 4, !tbaa !0
- %120 = load i32* @a0, align 4, !tbaa !0
+ %0 = load i32* @a0, align 4
+ %1 = load i32** @b0, align 4
+ store i32 %0, i32* %1, align 4
+ %2 = load i32* @a1, align 4
+ %3 = load i32** @b1, align 4
+ store i32 %2, i32* %3, align 4
+ %4 = load i32* @a2, align 4
+ %5 = load i32** @b2, align 4
+ store i32 %4, i32* %5, align 4
+ %6 = load i32* @a3, align 4
+ %7 = load i32** @b3, align 4
+ store i32 %6, i32* %7, align 4
+ %8 = load i32* @a4, align 4
+ %9 = load i32** @b4, align 4
+ store i32 %8, i32* %9, align 4
+ %10 = load i32* @a5, align 4
+ %11 = load i32** @b5, align 4
+ store i32 %10, i32* %11, align 4
+ %12 = load i32* @a6, align 4
+ %13 = load i32** @b6, align 4
+ store i32 %12, i32* %13, align 4
+ %14 = load i32* @a7, align 4
+ %15 = load i32** @b7, align 4
+ store i32 %14, i32* %15, align 4
+ %16 = load i32* @a8, align 4
+ %17 = load i32** @b8, align 4
+ store i32 %16, i32* %17, align 4
+ %18 = load i32* @a9, align 4
+ %19 = load i32** @b9, align 4
+ store i32 %18, i32* %19, align 4
+ %20 = load i32* @a10, align 4
+ %21 = load i32** @b10, align 4
+ store i32 %20, i32* %21, align 4
+ %22 = load i32* @a11, align 4
+ %23 = load i32** @b11, align 4
+ store i32 %22, i32* %23, align 4
+ %24 = load i32* @a12, align 4
+ %25 = load i32** @b12, align 4
+ store i32 %24, i32* %25, align 4
+ %26 = load i32* @a13, align 4
+ %27 = load i32** @b13, align 4
+ store i32 %26, i32* %27, align 4
+ %28 = load i32* @a14, align 4
+ %29 = load i32** @b14, align 4
+ store i32 %28, i32* %29, align 4
+ %30 = load i32* @a15, align 4
+ %31 = load i32** @b15, align 4
+ store i32 %30, i32* %31, align 4
+ %32 = load i32* @a16, align 4
+ %33 = load i32** @b16, align 4
+ store i32 %32, i32* %33, align 4
+ %34 = load i32* @a17, align 4
+ %35 = load i32** @b17, align 4
+ store i32 %34, i32* %35, align 4
+ %36 = load i32* @a18, align 4
+ %37 = load i32** @b18, align 4
+ store i32 %36, i32* %37, align 4
+ %38 = load i32* @a19, align 4
+ %39 = load i32** @b19, align 4
+ store i32 %38, i32* %39, align 4
+ %40 = load i32* @a20, align 4
+ %41 = load i32** @b20, align 4
+ store i32 %40, i32* %41, align 4
+ %42 = load i32* @a21, align 4
+ %43 = load i32** @b21, align 4
+ store i32 %42, i32* %43, align 4
+ %44 = load i32* @a22, align 4
+ %45 = load i32** @b22, align 4
+ store i32 %44, i32* %45, align 4
+ %46 = load i32* @a23, align 4
+ %47 = load i32** @b23, align 4
+ store i32 %46, i32* %47, align 4
+ %48 = load i32* @a24, align 4
+ %49 = load i32** @b24, align 4
+ store i32 %48, i32* %49, align 4
+ %50 = load i32* @a25, align 4
+ %51 = load i32** @b25, align 4
+ store i32 %50, i32* %51, align 4
+ %52 = load i32* @a26, align 4
+ %53 = load i32** @b26, align 4
+ store i32 %52, i32* %53, align 4
+ %54 = load i32* @a27, align 4
+ %55 = load i32** @b27, align 4
+ store i32 %54, i32* %55, align 4
+ %56 = load i32* @a28, align 4
+ %57 = load i32** @b28, align 4
+ store i32 %56, i32* %57, align 4
+ %58 = load i32* @a29, align 4
+ %59 = load i32** @b29, align 4
+ store i32 %58, i32* %59, align 4
+ %60 = load i32* @a0, align 4
+ %61 = load i32** @c0, align 4
+ store i32 %60, i32* %61, align 4
+ %62 = load i32* @a1, align 4
+ %63 = load i32** @c1, align 4
+ store i32 %62, i32* %63, align 4
+ %64 = load i32* @a2, align 4
+ %65 = load i32** @c2, align 4
+ store i32 %64, i32* %65, align 4
+ %66 = load i32* @a3, align 4
+ %67 = load i32** @c3, align 4
+ store i32 %66, i32* %67, align 4
+ %68 = load i32* @a4, align 4
+ %69 = load i32** @c4, align 4
+ store i32 %68, i32* %69, align 4
+ %70 = load i32* @a5, align 4
+ %71 = load i32** @c5, align 4
+ store i32 %70, i32* %71, align 4
+ %72 = load i32* @a6, align 4
+ %73 = load i32** @c6, align 4
+ store i32 %72, i32* %73, align 4
+ %74 = load i32* @a7, align 4
+ %75 = load i32** @c7, align 4
+ store i32 %74, i32* %75, align 4
+ %76 = load i32* @a8, align 4
+ %77 = load i32** @c8, align 4
+ store i32 %76, i32* %77, align 4
+ %78 = load i32* @a9, align 4
+ %79 = load i32** @c9, align 4
+ store i32 %78, i32* %79, align 4
+ %80 = load i32* @a10, align 4
+ %81 = load i32** @c10, align 4
+ store i32 %80, i32* %81, align 4
+ %82 = load i32* @a11, align 4
+ %83 = load i32** @c11, align 4
+ store i32 %82, i32* %83, align 4
+ %84 = load i32* @a12, align 4
+ %85 = load i32** @c12, align 4
+ store i32 %84, i32* %85, align 4
+ %86 = load i32* @a13, align 4
+ %87 = load i32** @c13, align 4
+ store i32 %86, i32* %87, align 4
+ %88 = load i32* @a14, align 4
+ %89 = load i32** @c14, align 4
+ store i32 %88, i32* %89, align 4
+ %90 = load i32* @a15, align 4
+ %91 = load i32** @c15, align 4
+ store i32 %90, i32* %91, align 4
+ %92 = load i32* @a16, align 4
+ %93 = load i32** @c16, align 4
+ store i32 %92, i32* %93, align 4
+ %94 = load i32* @a17, align 4
+ %95 = load i32** @c17, align 4
+ store i32 %94, i32* %95, align 4
+ %96 = load i32* @a18, align 4
+ %97 = load i32** @c18, align 4
+ store i32 %96, i32* %97, align 4
+ %98 = load i32* @a19, align 4
+ %99 = load i32** @c19, align 4
+ store i32 %98, i32* %99, align 4
+ %100 = load i32* @a20, align 4
+ %101 = load i32** @c20, align 4
+ store i32 %100, i32* %101, align 4
+ %102 = load i32* @a21, align 4
+ %103 = load i32** @c21, align 4
+ store i32 %102, i32* %103, align 4
+ %104 = load i32* @a22, align 4
+ %105 = load i32** @c22, align 4
+ store i32 %104, i32* %105, align 4
+ %106 = load i32* @a23, align 4
+ %107 = load i32** @c23, align 4
+ store i32 %106, i32* %107, align 4
+ %108 = load i32* @a24, align 4
+ %109 = load i32** @c24, align 4
+ store i32 %108, i32* %109, align 4
+ %110 = load i32* @a25, align 4
+ %111 = load i32** @c25, align 4
+ store i32 %110, i32* %111, align 4
+ %112 = load i32* @a26, align 4
+ %113 = load i32** @c26, align 4
+ store i32 %112, i32* %113, align 4
+ %114 = load i32* @a27, align 4
+ %115 = load i32** @c27, align 4
+ store i32 %114, i32* %115, align 4
+ %116 = load i32* @a28, align 4
+ %117 = load i32** @c28, align 4
+ store i32 %116, i32* %117, align 4
+ %118 = load i32* @a29, align 4
+ %119 = load i32** @c29, align 4
+ store i32 %118, i32* %119, align 4
+ %120 = load i32* @a0, align 4
ret i32 %120
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/Mips/rotate.ll b/test/CodeGen/Mips/rotate.ll
index 4f3cfb7..813bbdf 100644
--- a/test/CodeGen/Mips/rotate.ll
+++ b/test/CodeGen/Mips/rotate.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=mips16
; CHECK: rotrv $2, $4
+; mips16: .ent rot0
define i32 @rot0(i32 %a, i32 %b) nounwind readnone {
entry:
%shl = shl i32 %a, %b
@@ -11,6 +13,7 @@ entry:
}
; CHECK: rotr $2, $4, 22
+; mips16: .ent rot1
define i32 @rot1(i32 %a) nounwind readnone {
entry:
%shl = shl i32 %a, 10
@@ -20,6 +23,7 @@ entry:
}
; CHECK: rotrv $2, $4, $5
+; mips16: .ent rot2
define i32 @rot2(i32 %a, i32 %b) nounwind readnone {
entry:
%shr = lshr i32 %a, %b
@@ -30,6 +34,7 @@ entry:
}
; CHECK: rotr $2, $4, 10
+; mips16: .ent rot3
define i32 @rot3(i32 %a) nounwind readnone {
entry:
%shr = lshr i32 %a, 10
diff --git a/test/CodeGen/Mips/sel1c.ll b/test/CodeGen/Mips/sel1c.ll
new file mode 100644
index 0000000..4c4784d
--- /dev/null
+++ b/test/CodeGen/Mips/sel1c.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 1, align 4
+@j = global i32 2, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @t() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %1 = load i32* @j, align 4
+ %cmp = icmp eq i32 %0, %1
+ %cond = select i1 %cmp, i32 1, i32 2
+ store i32 %cond, i32* @k, align 4
+ ret void
+; cond-b-short: bteqz $BB0_{{[0-9]+}} # 16 bit inst
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/sel2c.ll b/test/CodeGen/Mips/sel2c.ll
new file mode 100644
index 0000000..25dfaa9
--- /dev/null
+++ b/test/CodeGen/Mips/sel2c.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 1, align 4
+@j = global i32 2, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @t() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %1 = load i32* @j, align 4
+ %cmp = icmp ne i32 %0, %1
+ %cond = select i1 %cmp, i32 1, i32 2
+ store i32 %cond, i32* @k, align 4
+; cond-b-short: btnez $BB0_{{[0-9]+}} # 16 bit inst
+ ret void
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/simplebr.ll b/test/CodeGen/Mips/simplebr.ll
new file mode 100644
index 0000000..a1d6367
--- /dev/null
+++ b/test/CodeGen/Mips/simplebr.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=CHECK-STATIC16
+
+; ModuleID = 'simplebr.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ call void bitcast (void (...)* @goo to void ()*)()
+ br label %if.end
+
+if.else: ; preds = %entry
+ call void bitcast (void (...)* @hoo to void ()*)()
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+; CHECK-STATIC16: b $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+
+declare void @goo(...) #1
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/stack-alignment.ll b/test/CodeGen/Mips/stack-alignment.ll
new file mode 100644
index 0000000..b18f966
--- /dev/null
+++ b/test/CodeGen/Mips/stack-alignment.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
+
+; 32: addiu $sp, $sp, -8
+; 64: addiu $sp, $sp, -16
+
+define i32 @foo1() #0 {
+entry:
+ ret i32 14
+}
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
diff --git a/test/CodeGen/Mips/tailcall.ll b/test/CodeGen/Mips/tailcall.ll
index bcd33fc..30f47ab 100644
--- a/test/CodeGen/Mips/tailcall.ll
+++ b/test/CodeGen/Mips/tailcall.ll
@@ -243,3 +243,16 @@ entry:
ret i32 %call
}
+; Check that there is a chain edge between the load and store nodes.
+;
+; PIC32-LABEL: caller14:
+; PIC32: lw ${{[0-9]+}}, 16($sp)
+; PIC32: sw $4, 16($sp)
+
+define void @caller14(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+entry:
+ tail call void @callee14(i32 %e, i32 %b, i32 %c, i32 %d, i32 %a)
+ ret void
+}
+
+declare void @callee14(i32, i32, i32, i32, i32)
diff --git a/test/CodeGen/Mips/tnaked.ll b/test/CodeGen/Mips/tnaked.ll
index edf1ecf..08f1ab5 100644
--- a/test/CodeGen/Mips/tnaked.ll
+++ b/test/CodeGen/Mips/tnaked.ll
@@ -25,5 +25,5 @@ entry:
; CHECK: .fmask 0x00000000,0
; CHECK: addiu $sp, $sp, -8
-attributes #0 = { naked noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { naked noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }