aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Mips/msa
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Mips/msa')
-rw-r--r--test/CodeGen/Mips/msa/2r.ll24
-rw-r--r--test/CodeGen/Mips/msa/2r_vector_scalar.ll8
-rw-r--r--test/CodeGen/Mips/msa/2rf.ll32
-rw-r--r--test/CodeGen/Mips/msa/2rf_exup.ll8
-rw-r--r--test/CodeGen/Mips/msa/2rf_float_int.ll8
-rw-r--r--test/CodeGen/Mips/msa/2rf_fq.ll8
-rw-r--r--test/CodeGen/Mips/msa/2rf_int_float.ll20
-rw-r--r--test/CodeGen/Mips/msa/2rf_tq.ll8
-rw-r--r--test/CodeGen/Mips/msa/3r-a.ll192
-rw-r--r--test/CodeGen/Mips/msa/3r-b.ll96
-rw-r--r--test/CodeGen/Mips/msa/3r-c.ll80
-rw-r--r--test/CodeGen/Mips/msa/3r-d.ll88
-rw-r--r--test/CodeGen/Mips/msa/3r-i.ll64
-rw-r--r--test/CodeGen/Mips/msa/3r-m.ll160
-rw-r--r--test/CodeGen/Mips/msa/3r-p.ll32
-rw-r--r--test/CodeGen/Mips/msa/3r-s.ll248
-rw-r--r--test/CodeGen/Mips/msa/3r-v.ll24
-rw-r--r--test/CodeGen/Mips/msa/3r_4r.ll48
-rw-r--r--test/CodeGen/Mips/msa/3r_4r_widen.ll72
-rw-r--r--test/CodeGen/Mips/msa/3r_splat.ll8
-rw-r--r--test/CodeGen/Mips/msa/3rf.ll96
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf.ll24
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf_q.ll48
-rw-r--r--test/CodeGen/Mips/msa/3rf_exdo.ll8
-rw-r--r--test/CodeGen/Mips/msa/3rf_float_int.ll8
-rw-r--r--test/CodeGen/Mips/msa/3rf_int_float.ll176
-rw-r--r--test/CodeGen/Mips/msa/3rf_q.ll16
-rw-r--r--test/CodeGen/Mips/msa/arithmetic.ll176
-rw-r--r--test/CodeGen/Mips/msa/arithmetic_float.ll88
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll72
-rw-r--r--test/CodeGen/Mips/msa/basic_operations_float.ll34
-rw-r--r--test/CodeGen/Mips/msa/bit.ll56
-rw-r--r--test/CodeGen/Mips/msa/bitcast.ll98
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll310
-rw-r--r--test/CodeGen/Mips/msa/compare.ll408
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll156
-rw-r--r--test/CodeGen/Mips/msa/elm_copy.ll16
-rw-r--r--test/CodeGen/Mips/msa/elm_insv.ll32
-rw-r--r--test/CodeGen/Mips/msa/elm_move.ll2
-rw-r--r--test/CodeGen/Mips/msa/elm_shift_slide.ll24
-rw-r--r--test/CodeGen/Mips/msa/frameindex.ll58
-rw-r--r--test/CodeGen/Mips/msa/i10.ll8
-rw-r--r--test/CodeGen/Mips/msa/i5-a.ll8
-rw-r--r--test/CodeGen/Mips/msa/i5-b.ll56
-rw-r--r--test/CodeGen/Mips/msa/i5-c.ll40
-rw-r--r--test/CodeGen/Mips/msa/i5-m.ll32
-rw-r--r--test/CodeGen/Mips/msa/i5-s.ll8
-rw-r--r--test/CodeGen/Mips/msa/i5_ld_st.ll8
-rw-r--r--test/CodeGen/Mips/msa/i8.ll26
-rw-r--r--test/CodeGen/Mips/msa/inline-asm.ll4
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s525530439.ll22
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s997348632.ll22
-rw-r--r--test/CodeGen/Mips/msa/shuffle.ll166
-rw-r--r--test/CodeGen/Mips/msa/spill.ll536
-rw-r--r--test/CodeGen/Mips/msa/vec.ll184
-rw-r--r--test/CodeGen/Mips/msa/vecs10.ll4
62 files changed, 2195 insertions, 2195 deletions
diff --git a/test/CodeGen/Mips/msa/2r.ll b/test/CodeGen/Mips/msa/2r.ll
index da35ad8..501936c 100644
--- a/test/CodeGen/Mips/msa/2r.ll
+++ b/test/CodeGen/Mips/msa/2r.ll
@@ -8,7 +8,7 @@
define void @llvm_mips_nloc_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nloc_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
ret void
@@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind
define void @llvm_mips_nloc_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nloc_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
ret void
@@ -50,7 +50,7 @@ declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind
define void @llvm_mips_nloc_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nloc_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
ret void
@@ -71,7 +71,7 @@ declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind
define void @llvm_mips_nloc_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nloc_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
ret void
@@ -92,7 +92,7 @@ declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind
define void @llvm_mips_nlzc_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nlzc_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
ret void
@@ -113,7 +113,7 @@ declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind
define void @llvm_mips_nlzc_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nlzc_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
ret void
@@ -134,7 +134,7 @@ declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind
define void @llvm_mips_nlzc_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nlzc_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
ret void
@@ -155,7 +155,7 @@ declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind
define void @llvm_mips_nlzc_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nlzc_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
ret void
@@ -176,7 +176,7 @@ declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind
define void @llvm_mips_pcnt_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pcnt_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
ret void
@@ -197,7 +197,7 @@ declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind
define void @llvm_mips_pcnt_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pcnt_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
ret void
@@ -218,7 +218,7 @@ declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind
define void @llvm_mips_pcnt_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pcnt_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
ret void
@@ -239,7 +239,7 @@ declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind
define void @llvm_mips_pcnt_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pcnt_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
index 64e459e..ddcd3cf 100644
--- a/test/CodeGen/Mips/msa/2r_vector_scalar.ll
+++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
@@ -15,7 +15,7 @@
define void @llvm_mips_fill_b_test() nounwind {
entry:
- %0 = load i32* @llvm_mips_fill_b_ARG1
+ %0 = load i32, i32* @llvm_mips_fill_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0)
store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES
ret void
@@ -35,7 +35,7 @@ declare <16 x i8> @llvm.mips.fill.b(i32) nounwind
define void @llvm_mips_fill_h_test() nounwind {
entry:
- %0 = load i32* @llvm_mips_fill_h_ARG1
+ %0 = load i32, i32* @llvm_mips_fill_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0)
store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES
ret void
@@ -55,7 +55,7 @@ declare <8 x i16> @llvm.mips.fill.h(i32) nounwind
define void @llvm_mips_fill_w_test() nounwind {
entry:
- %0 = load i32* @llvm_mips_fill_w_ARG1
+ %0 = load i32, i32* @llvm_mips_fill_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES
ret void
@@ -75,7 +75,7 @@ declare <4 x i32> @llvm.mips.fill.w(i32) nounwind
define void @llvm_mips_fill_d_test() nounwind {
entry:
- %0 = load i64* @llvm_mips_fill_d_ARG1
+ %0 = load i64, i64* @llvm_mips_fill_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf.ll b/test/CodeGen/Mips/msa/2rf.ll
index b361ef5..1dbfbda 100644
--- a/test/CodeGen/Mips/msa/2rf.ll
+++ b/test/CodeGen/Mips/msa/2rf.ll
@@ -8,7 +8,7 @@
define void @llvm_mips_flog2_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
%1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
ret void
@@ -29,7 +29,7 @@ declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind
define void @llvm_mips_flog2_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
%1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
ret void
@@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind
define void @flog2_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
%1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
ret void
@@ -65,7 +65,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float> %val)
define void @flog2_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
%1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
ret void
@@ -86,7 +86,7 @@ declare <2 x double> @llvm.log2.v2f64(<2 x double> %val)
define void @llvm_mips_frint_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
ret void
@@ -107,7 +107,7 @@ declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind
define void @llvm_mips_frint_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
ret void
@@ -125,7 +125,7 @@ declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind
define void @frint_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
%1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
ret void
@@ -143,7 +143,7 @@ declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind
define void @frint_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
%1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
ret void
@@ -164,7 +164,7 @@ declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind
define void @llvm_mips_frcp_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_frcp_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_frcp_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES
ret void
@@ -185,7 +185,7 @@ declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind
define void @llvm_mips_frcp_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_frcp_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_frcp_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES
ret void
@@ -206,7 +206,7 @@ declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind
define void @llvm_mips_frsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_frsqrt_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_frsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES
ret void
@@ -227,7 +227,7 @@ declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind
define void @llvm_mips_frsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_frsqrt_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_frsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES
ret void
@@ -248,7 +248,7 @@ declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind
define void @llvm_mips_fsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
ret void
@@ -269,7 +269,7 @@ declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind
define void @llvm_mips_fsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
ret void
@@ -287,7 +287,7 @@ declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind
define void @fsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
ret void
@@ -305,7 +305,7 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind
define void @fsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf_exup.ll b/test/CodeGen/Mips/msa/2rf_exup.ll
index 8d7cc36..fd81ff6 100644
--- a/test/CodeGen/Mips/msa/2rf_exup.ll
+++ b/test/CodeGen/Mips/msa/2rf_exup.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_fexupl_w_test() nounwind {
entry:
- %0 = load <8 x half>* @llvm_mips_fexupl_w_ARG1
+ %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupl_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES
ret void
@@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind
define void @llvm_mips_fexupl_d_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fexupl_d_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupl_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES
ret void
@@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind
define void @llvm_mips_fexupr_w_test() nounwind {
entry:
- %0 = load <8 x half>* @llvm_mips_fexupr_w_ARG1
+ %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupr_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind
define void @llvm_mips_fexupr_d_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fexupr_d_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupr_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf_float_int.ll b/test/CodeGen/Mips/msa/2rf_float_int.ll
index 3b5dfda..3690158 100644
--- a/test/CodeGen/Mips/msa/2rf_float_int.ll
+++ b/test/CodeGen/Mips/msa/2rf_float_int.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_ffint_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ffint_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_s_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES
ret void
@@ -30,7 +30,7 @@ declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind
define void @llvm_mips_ffint_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ffint_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_s_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES
ret void
@@ -51,7 +51,7 @@ declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind
define void @llvm_mips_ffint_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ffint_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_u_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES
ret void
@@ -72,7 +72,7 @@ declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind
define void @llvm_mips_ffint_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ffint_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_u_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf_fq.ll b/test/CodeGen/Mips/msa/2rf_fq.ll
index 021dd93..05c649e 100644
--- a/test/CodeGen/Mips/msa/2rf_fq.ll
+++ b/test/CodeGen/Mips/msa/2rf_fq.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_ffql_w_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ffql_w_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffql_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES
ret void
@@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind
define void @llvm_mips_ffql_d_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ffql_d_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffql_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES
ret void
@@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind
define void @llvm_mips_ffqr_w_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ffqr_w_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffqr_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0)
store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind
define void @llvm_mips_ffqr_d_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ffqr_d_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffqr_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0)
store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf_int_float.ll b/test/CodeGen/Mips/msa/2rf_int_float.ll
index 4665ae0..77d1404 100644
--- a/test/CodeGen/Mips/msa/2rf_int_float.ll
+++ b/test/CodeGen/Mips/msa/2rf_int_float.ll
@@ -10,7 +10,7 @@
define void @llvm_mips_fclass_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fclass_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
ret void
@@ -31,7 +31,7 @@ declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind
define void @llvm_mips_fclass_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fclass_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
ret void
@@ -52,7 +52,7 @@ declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind
define void @llvm_mips_ftrunc_s_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
ret void
@@ -73,7 +73,7 @@ declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind
define void @llvm_mips_ftrunc_s_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
ret void
@@ -94,7 +94,7 @@ declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind
define void @llvm_mips_ftrunc_u_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
ret void
@@ -115,7 +115,7 @@ declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind
define void @llvm_mips_ftrunc_u_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
ret void
@@ -136,7 +136,7 @@ declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind
define void @llvm_mips_ftint_s_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
ret void
@@ -157,7 +157,7 @@ declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind
define void @llvm_mips_ftint_s_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
ret void
@@ -178,7 +178,7 @@ declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind
define void @llvm_mips_ftint_u_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
ret void
@@ -199,7 +199,7 @@ declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind
define void @llvm_mips_ftint_u_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/2rf_tq.ll b/test/CodeGen/Mips/msa/2rf_tq.ll
index 6f3c508..9b7f02a 100644
--- a/test/CodeGen/Mips/msa/2rf_tq.ll
+++ b/test/CodeGen/Mips/msa/2rf_tq.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_ftq_h_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_ftq_h_ARG1
- %1 = load <4 x float>* @llvm_mips_ftq_h_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES
ret void
@@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_ftq_w_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_ftq_w_ARG1
- %1 = load <2 x double>* @llvm_mips_ftq_w_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-a.ll b/test/CodeGen/Mips/msa/3r-a.ll
index dab15b6..db772f9 100644
--- a/test/CodeGen/Mips/msa/3r-a.ll
+++ b/test/CodeGen/Mips/msa/3r-a.ll
@@ -15,8 +15,8 @@
define void @llvm_mips_add_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
ret void
@@ -40,8 +40,8 @@ declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_add_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
ret void
@@ -65,8 +65,8 @@ declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_add_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
ret void
@@ -90,8 +90,8 @@ declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_add_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
ret void
@@ -115,8 +115,8 @@ declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_adds_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
ret void
@@ -140,8 +140,8 @@ declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_adds_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
ret void
@@ -165,8 +165,8 @@ declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_adds_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
ret void
@@ -190,8 +190,8 @@ declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_adds_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
ret void
@@ -215,8 +215,8 @@ declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_adds_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
ret void
@@ -240,8 +240,8 @@ declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_adds_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
ret void
@@ -265,8 +265,8 @@ declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_adds_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
ret void
@@ -290,8 +290,8 @@ declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_adds_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
ret void
@@ -315,8 +315,8 @@ declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_adds_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
ret void
@@ -340,8 +340,8 @@ declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_adds_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
ret void
@@ -365,8 +365,8 @@ declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_adds_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
ret void
@@ -390,8 +390,8 @@ declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_adds_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
ret void
@@ -415,8 +415,8 @@ declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_addv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
ret void
@@ -440,8 +440,8 @@ declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_addv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
ret void
@@ -465,8 +465,8 @@ declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_addv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
ret void
@@ -490,8 +490,8 @@ declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_addv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
ret void
@@ -512,8 +512,8 @@ declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
define void @addv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
%2 = add <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
ret void
@@ -532,8 +532,8 @@ entry:
define void @addv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
%2 = add <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
ret void
@@ -552,8 +552,8 @@ entry:
define void @addv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
%2 = add <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
ret void
@@ -572,8 +572,8 @@ entry:
define void @addv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
%2 = add <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
ret void
@@ -595,8 +595,8 @@ entry:
define void @llvm_mips_asub_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
ret void
@@ -620,8 +620,8 @@ declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_asub_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
ret void
@@ -645,8 +645,8 @@ declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_asub_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
ret void
@@ -670,8 +670,8 @@ declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_asub_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
ret void
@@ -695,8 +695,8 @@ declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_asub_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
ret void
@@ -720,8 +720,8 @@ declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_asub_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
ret void
@@ -745,8 +745,8 @@ declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_asub_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
ret void
@@ -770,8 +770,8 @@ declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_asub_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
ret void
@@ -795,8 +795,8 @@ declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_ave_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
ret void
@@ -820,8 +820,8 @@ declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ave_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
ret void
@@ -845,8 +845,8 @@ declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ave_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
ret void
@@ -870,8 +870,8 @@ declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ave_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
ret void
@@ -895,8 +895,8 @@ declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_ave_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
ret void
@@ -920,8 +920,8 @@ declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ave_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
ret void
@@ -945,8 +945,8 @@ declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ave_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
ret void
@@ -970,8 +970,8 @@ declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ave_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
ret void
@@ -995,8 +995,8 @@ declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_aver_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
ret void
@@ -1020,8 +1020,8 @@ declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_aver_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
ret void
@@ -1045,8 +1045,8 @@ declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_aver_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
ret void
@@ -1070,8 +1070,8 @@ declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_aver_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
ret void
@@ -1095,8 +1095,8 @@ declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_aver_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
ret void
@@ -1120,8 +1120,8 @@ declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_aver_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
ret void
@@ -1145,8 +1145,8 @@ declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_aver_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
ret void
@@ -1170,8 +1170,8 @@ declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_aver_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-b.ll b/test/CodeGen/Mips/msa/3r-b.ll
index a05d19b..2ecdc42 100644
--- a/test/CodeGen/Mips/msa/3r-b.ll
+++ b/test/CodeGen/Mips/msa/3r-b.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_bclr_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bclr_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bclr_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_bclr_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bclr_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bclr_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_bclr_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bclr_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bclr_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_bclr_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bclr_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bclr_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
ret void
@@ -99,9 +99,9 @@ declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_binsl_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_binsl_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_binsl_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_binsl_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES
ret void
@@ -127,9 +127,9 @@ declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_binsl_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_binsl_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_binsl_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_binsl_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES
ret void
@@ -155,9 +155,9 @@ declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_binsl_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_binsl_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_binsl_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_binsl_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES
ret void
@@ -183,9 +183,9 @@ declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_binsl_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_binsl_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_binsl_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_binsl_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES
ret void
@@ -211,9 +211,9 @@ declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_binsr_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_binsr_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_binsr_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_binsr_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES
ret void
@@ -239,9 +239,9 @@ declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_binsr_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_binsr_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_binsr_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_binsr_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES
ret void
@@ -267,9 +267,9 @@ declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_binsr_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_binsr_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_binsr_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_binsr_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES
ret void
@@ -295,9 +295,9 @@ declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_binsr_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_binsr_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_binsr_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_binsr_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES
ret void
@@ -322,8 +322,8 @@ declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_bneg_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bneg_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bneg_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
ret void
@@ -344,8 +344,8 @@ declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_bneg_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bneg_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bneg_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
ret void
@@ -366,8 +366,8 @@ declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_bneg_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bneg_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bneg_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
ret void
@@ -388,8 +388,8 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_bneg_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bneg_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bneg_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
ret void
@@ -410,8 +410,8 @@ declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_bset_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bset_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bset_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
ret void
@@ -432,8 +432,8 @@ declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_bset_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bset_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bset_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
ret void
@@ -454,8 +454,8 @@ declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_bset_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bset_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bset_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
ret void
@@ -476,8 +476,8 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_bset_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bset_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bset_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-c.ll b/test/CodeGen/Mips/msa/3r-c.ll
index 6ec92c2..a3913e0 100644
--- a/test/CodeGen/Mips/msa/3r-c.ll
+++ b/test/CodeGen/Mips/msa/3r-c.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_ceq_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ceq_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ceq_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ceq_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
ret void
@@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_cle_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
ret void
@@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_cle_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
ret void
@@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_cle_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
ret void
@@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_cle_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
ret void
@@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_cle_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
ret void
@@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_cle_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
ret void
@@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_cle_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
ret void
@@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_cle_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
ret void
@@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_clt_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
ret void
@@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_clt_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
ret void
@@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_clt_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
ret void
@@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_clt_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
ret void
@@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_clt_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
ret void
@@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_clt_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
ret void
@@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_clt_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
ret void
@@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_clt_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-d.ll b/test/CodeGen/Mips/msa/3r-d.ll
index 0099554..4fc32b7 100644
--- a/test/CodeGen/Mips/msa/3r-d.ll
+++ b/test/CodeGen/Mips/msa/3r-d.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_div_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_div_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_div_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_div_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
ret void
@@ -95,8 +95,8 @@ declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind
define void @div_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
%2 = sdiv <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
ret void
@@ -111,8 +111,8 @@ entry:
define void @div_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
%2 = sdiv <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
ret void
@@ -127,8 +127,8 @@ entry:
define void @div_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
%2 = sdiv <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
ret void
@@ -143,8 +143,8 @@ entry:
define void @div_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
%2 = sdiv <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
ret void
@@ -163,8 +163,8 @@ entry:
define void @llvm_mips_div_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
ret void
@@ -185,8 +185,8 @@ declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_div_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
ret void
@@ -207,8 +207,8 @@ declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_div_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
ret void
@@ -229,8 +229,8 @@ declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_div_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
ret void
@@ -248,8 +248,8 @@ declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind
define void @div_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
%2 = udiv <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
ret void
@@ -264,8 +264,8 @@ entry:
define void @div_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
%2 = udiv <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
ret void
@@ -280,8 +280,8 @@ entry:
define void @div_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
%2 = udiv <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
ret void
@@ -296,8 +296,8 @@ entry:
define void @div_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
%2 = udiv <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
ret void
@@ -326,8 +326,8 @@ entry:
define void @llvm_mips_dotp_s_h_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES
ret void
@@ -353,8 +353,8 @@ declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dotp_s_w_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES
ret void
@@ -377,8 +377,8 @@ declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dotp_s_d_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES
ret void
@@ -409,8 +409,8 @@ declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_dotp_u_h_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES
ret void
@@ -436,8 +436,8 @@ declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dotp_u_w_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES
ret void
@@ -460,8 +460,8 @@ declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dotp_u_d_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-i.ll b/test/CodeGen/Mips/msa/3r-i.ll
index 2ef3047..7147b75 100644
--- a/test/CodeGen/Mips/msa/3r-i.ll
+++ b/test/CodeGen/Mips/msa/3r-i.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_ilvev_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ilvev_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ilvev_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ilvev_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ilvev_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ilvev_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ilvev_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ilvev_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ilvev_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ilvev_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ilvev_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ilvev_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES
ret void
@@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_ilvl_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ilvl_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ilvl_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES
ret void
@@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ilvl_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ilvl_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ilvl_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES
ret void
@@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ilvl_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ilvl_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ilvl_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES
ret void
@@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ilvl_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ilvl_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ilvl_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES
ret void
@@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_ilvod_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ilvod_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ilvod_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES
ret void
@@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ilvod_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ilvod_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ilvod_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES
ret void
@@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ilvod_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ilvod_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ilvod_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES
ret void
@@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ilvod_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ilvod_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ilvod_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES
ret void
@@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_ilvr_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ilvr_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_ilvr_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES
ret void
@@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_ilvr_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ilvr_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_ilvr_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES
ret void
@@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_ilvr_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ilvr_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_ilvr_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES
ret void
@@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_ilvr_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ilvr_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_ilvr_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-m.ll b/test/CodeGen/Mips/msa/3r-m.ll
index ddfd720..39b4f7d 100644
--- a/test/CodeGen/Mips/msa/3r-m.ll
+++ b/test/CodeGen/Mips/msa/3r-m.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_max_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_max_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_max_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_max_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
ret void
@@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_max_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
ret void
@@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_max_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
ret void
@@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_max_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
ret void
@@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_max_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
ret void
@@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_max_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
ret void
@@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_max_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
ret void
@@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_max_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
ret void
@@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_max_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
ret void
@@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_min_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
ret void
@@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_min_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
ret void
@@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_min_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
ret void
@@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_min_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
ret void
@@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_min_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
ret void
@@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_min_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
ret void
@@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_min_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
ret void
@@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_min_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
ret void
@@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_min_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
ret void
@@ -472,8 +472,8 @@ declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_min_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
ret void
@@ -494,8 +494,8 @@ declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_min_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
ret void
@@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_min_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
ret void
@@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_mod_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
ret void
@@ -560,8 +560,8 @@ declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_mod_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
ret void
@@ -582,8 +582,8 @@ declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_mod_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
ret void
@@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_mod_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
ret void
@@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_mod_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
ret void
@@ -648,8 +648,8 @@ declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_mod_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
ret void
@@ -670,8 +670,8 @@ declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_mod_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
ret void
@@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_mod_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
ret void
@@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_mulv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
ret void
@@ -736,8 +736,8 @@ declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_mulv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
ret void
@@ -758,8 +758,8 @@ declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_mulv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
ret void
@@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_mulv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
ret void
@@ -798,8 +798,8 @@ declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind
define void @mulv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
%2 = mul <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
ret void
@@ -814,8 +814,8 @@ entry:
define void @mulv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
%2 = mul <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
ret void
@@ -830,8 +830,8 @@ entry:
define void @mulv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
%2 = mul <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
ret void
@@ -846,8 +846,8 @@ entry:
define void @mulv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
%2 = mul <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-p.ll b/test/CodeGen/Mips/msa/3r-p.ll
index 852023b..70b98aa 100644
--- a/test/CodeGen/Mips/msa/3r-p.ll
+++ b/test/CodeGen/Mips/msa/3r-p.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_pckev_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
ret void
@@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_pckev_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
ret void
@@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_pckev_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_pckev_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
ret void
@@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_pckod_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
ret void
@@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_pckod_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
ret void
@@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_pckod_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
ret void
@@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_pckod_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll
index 581c3bf..d04c5ff 100644
--- a/test/CodeGen/Mips/msa/3r-s.ll
+++ b/test/CodeGen/Mips/msa/3r-s.ll
@@ -11,9 +11,9 @@
define void @llvm_mips_sld_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2
- %2 = load i32* @llvm_mips_sld_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG2
+ %2 = load i32, i32* @llvm_mips_sld_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES
ret void
@@ -39,9 +39,9 @@ declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_sld_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2
- %2 = load i32* @llvm_mips_sld_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG2
+ %2 = load i32, i32* @llvm_mips_sld_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES
ret void
@@ -67,9 +67,9 @@ declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind
define void @llvm_mips_sld_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2
- %2 = load i32* @llvm_mips_sld_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG2
+ %2 = load i32, i32* @llvm_mips_sld_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES
ret void
@@ -95,9 +95,9 @@ declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind
define void @llvm_mips_sld_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2
- %2 = load i32* @llvm_mips_sld_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG2
+ %2 = load i32, i32* @llvm_mips_sld_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES
ret void
@@ -122,8 +122,8 @@ declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind
define void @llvm_mips_sll_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
ret void
@@ -146,8 +146,8 @@ declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_sll_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
ret void
@@ -170,8 +170,8 @@ declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_sll_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
ret void
@@ -194,8 +194,8 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_sll_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
ret void
@@ -214,8 +214,8 @@ declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
define void @sll_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
%2 = shl <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
ret void
@@ -232,8 +232,8 @@ entry:
define void @sll_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
%2 = shl <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
ret void
@@ -250,8 +250,8 @@ entry:
define void @sll_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
%2 = shl <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
ret void
@@ -268,8 +268,8 @@ entry:
define void @sll_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
%2 = shl <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
ret void
@@ -290,8 +290,8 @@ entry:
define void @llvm_mips_sra_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
ret void
@@ -314,8 +314,8 @@ declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_sra_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
ret void
@@ -338,8 +338,8 @@ declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_sra_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
ret void
@@ -362,8 +362,8 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_sra_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
ret void
@@ -383,8 +383,8 @@ declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
define void @sra_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
%2 = ashr <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
ret void
@@ -401,8 +401,8 @@ entry:
define void @sra_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
%2 = ashr <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
ret void
@@ -419,8 +419,8 @@ entry:
define void @sra_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
%2 = ashr <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
ret void
@@ -437,8 +437,8 @@ entry:
define void @sra_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
%2 = ashr <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
ret void
@@ -459,8 +459,8 @@ entry:
define void @llvm_mips_srar_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srar_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_srar_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES
ret void
@@ -483,8 +483,8 @@ declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_srar_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srar_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_srar_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES
ret void
@@ -507,8 +507,8 @@ declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_srar_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srar_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_srar_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES
ret void
@@ -531,8 +531,8 @@ declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_srar_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srar_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_srar_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES
ret void
@@ -555,8 +555,8 @@ declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_srl_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
ret void
@@ -579,8 +579,8 @@ declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_srl_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
ret void
@@ -603,8 +603,8 @@ declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_srl_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
ret void
@@ -627,8 +627,8 @@ declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_srl_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
ret void
@@ -651,8 +651,8 @@ declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_srlr_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srlr_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_srlr_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES
ret void
@@ -675,8 +675,8 @@ declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_srlr_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srlr_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_srlr_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES
ret void
@@ -699,8 +699,8 @@ declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_srlr_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srlr_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_srlr_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES
ret void
@@ -723,8 +723,8 @@ declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_srlr_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srlr_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_srlr_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES
ret void
@@ -744,8 +744,8 @@ declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind
define void @srl_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
%2 = lshr <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
ret void
@@ -762,8 +762,8 @@ entry:
define void @srl_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
%2 = lshr <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
ret void
@@ -780,8 +780,8 @@ entry:
define void @srl_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
%2 = lshr <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
ret void
@@ -798,8 +798,8 @@ entry:
define void @srl_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
%2 = lshr <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
ret void
@@ -820,8 +820,8 @@ entry:
define void @llvm_mips_subs_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
ret void
@@ -844,8 +844,8 @@ declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_subs_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
ret void
@@ -868,8 +868,8 @@ declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_subs_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
ret void
@@ -892,8 +892,8 @@ declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_subs_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
ret void
@@ -916,8 +916,8 @@ declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_subs_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
ret void
@@ -940,8 +940,8 @@ declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_subs_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
ret void
@@ -964,8 +964,8 @@ declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_subs_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
ret void
@@ -988,8 +988,8 @@ declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_subs_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
ret void
@@ -1012,8 +1012,8 @@ declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_subsus_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
ret void
@@ -1036,8 +1036,8 @@ declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_subsus_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
ret void
@@ -1060,8 +1060,8 @@ declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_subsus_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
ret void
@@ -1084,8 +1084,8 @@ declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_subsus_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
ret void
@@ -1108,8 +1108,8 @@ declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_subsuu_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
ret void
@@ -1132,8 +1132,8 @@ declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_subsuu_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
ret void
@@ -1156,8 +1156,8 @@ declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_subsuu_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
ret void
@@ -1180,8 +1180,8 @@ declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_subsuu_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
ret void
@@ -1204,8 +1204,8 @@ declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_subv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
ret void
@@ -1228,8 +1228,8 @@ declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_subv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
ret void
@@ -1252,8 +1252,8 @@ declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_subv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
ret void
@@ -1276,8 +1276,8 @@ declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_subv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
ret void
@@ -1297,8 +1297,8 @@ declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind
define void @subv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
%2 = sub <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
ret void
@@ -1315,8 +1315,8 @@ entry:
define void @subv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
%2 = sub <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
ret void
@@ -1333,8 +1333,8 @@ entry:
define void @subv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
%2 = sub <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
ret void
@@ -1351,8 +1351,8 @@ entry:
define void @subv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
%2 = sub <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r-v.ll b/test/CodeGen/Mips/msa/3r-v.ll
index c9693f9..2d36da4 100644
--- a/test/CodeGen/Mips/msa/3r-v.ll
+++ b/test/CodeGen/Mips/msa/3r-v.ll
@@ -11,9 +11,9 @@
define void @llvm_mips_vshf_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
ret void
@@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_vshf_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
ret void
@@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_vshf_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
ret void
@@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_vshf_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r_4r.ll b/test/CodeGen/Mips/msa/3r_4r.ll
index b7fd728..73d104c 100644
--- a/test/CodeGen/Mips/msa/3r_4r.ll
+++ b/test/CodeGen/Mips/msa/3r_4r.ll
@@ -11,9 +11,9 @@
define void @llvm_mips_maddv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
ret void
@@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_maddv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
ret void
@@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_maddv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
ret void
@@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_maddv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
ret void
@@ -111,9 +111,9 @@ declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define void @llvm_mips_msubv_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
ret void
@@ -136,9 +136,9 @@ declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_msubv_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
ret void
@@ -161,9 +161,9 @@ declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_msubv_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
ret void
@@ -186,9 +186,9 @@ declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_msubv_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r_4r_widen.ll b/test/CodeGen/Mips/msa/3r_4r_widen.ll
index 7063e45..fe248ee 100644
--- a/test/CodeGen/Mips/msa/3r_4r_widen.ll
+++ b/test/CodeGen/Mips/msa/3r_4r_widen.ll
@@ -12,9 +12,9 @@
define void @llvm_mips_dpadd_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
- %2 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES
ret void
@@ -37,9 +37,9 @@ declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dpadd_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
- %2 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES
ret void
@@ -62,9 +62,9 @@ declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dpadd_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
- %2 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES
ret void
@@ -87,9 +87,9 @@ declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_dpadd_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
- %2 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES
ret void
@@ -112,9 +112,9 @@ declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dpadd_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
- %2 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES
ret void
@@ -137,9 +137,9 @@ declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dpadd_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
- %2 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES
ret void
@@ -162,9 +162,9 @@ declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_dpsub_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
- %2 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
ret void
@@ -187,9 +187,9 @@ declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dpsub_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
- %2 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
ret void
@@ -212,9 +212,9 @@ declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dpsub_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
- %2 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
ret void
@@ -237,9 +237,9 @@ declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_dpsub_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
- %1 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
- %2 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
ret void
@@ -262,9 +262,9 @@ declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
define void @llvm_mips_dpsub_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
- %1 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
- %2 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
ret void
@@ -287,9 +287,9 @@ declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_dpsub_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
- %1 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
- %2 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3r_splat.ll b/test/CodeGen/Mips/msa/3r_splat.ll
index 6b0cb26..56d26b0 100644
--- a/test/CodeGen/Mips/msa/3r_splat.ll
+++ b/test/CodeGen/Mips/msa/3r_splat.ll
@@ -11,7 +11,7 @@
define void @llvm_mips_splat_b_test(i32 %a) nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splat_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a)
store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
ret void
@@ -32,7 +32,7 @@ declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind
define void @llvm_mips_splat_h_test(i32 %a) nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splat_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a)
store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
ret void
@@ -53,7 +53,7 @@ declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind
define void @llvm_mips_splat_w_test(i32 %a) nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splat_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a)
store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
ret void
@@ -74,7 +74,7 @@ declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind
define void @llvm_mips_splat_d_test(i32 %a) nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splat_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a)
store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf.ll b/test/CodeGen/Mips/msa/3rf.ll
index ae665af..dce0c27 100644
--- a/test/CodeGen/Mips/msa/3rf.ll
+++ b/test/CodeGen/Mips/msa/3rf.ll
@@ -9,8 +9,8 @@
define void @llvm_mips_fadd_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
ret void
@@ -31,8 +31,8 @@ declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fadd_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
ret void
@@ -49,8 +49,8 @@ declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
define void @fadd_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
%2 = fadd <4 x float> %0, %1
store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
ret void
@@ -65,8 +65,8 @@ entry:
define void @fadd_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
%2 = fadd <2 x double> %0, %1
store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
ret void
@@ -85,8 +85,8 @@ entry:
define void @llvm_mips_fdiv_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
ret void
@@ -107,8 +107,8 @@ declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fdiv_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
ret void
@@ -125,8 +125,8 @@ declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind
define void @fdiv_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
%2 = fdiv <4 x float> %0, %1
store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
ret void
@@ -141,8 +141,8 @@ entry:
define void @fdiv_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
%2 = fdiv <2 x double> %0, %1
store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
ret void
@@ -161,8 +161,8 @@ entry:
define void @llvm_mips_fmin_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmin_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmin_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES
ret void
@@ -183,8 +183,8 @@ declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fmin_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmin_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmin_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES
ret void
@@ -205,8 +205,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fmin_a_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmin_a_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmin_a_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES
ret void
@@ -227,8 +227,8 @@ declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fmin_a_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmin_a_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmin_a_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES
ret void
@@ -249,8 +249,8 @@ declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fmax_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmax_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmax_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES
ret void
@@ -271,8 +271,8 @@ declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fmax_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmax_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmax_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES
ret void
@@ -293,8 +293,8 @@ declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fmax_a_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmax_a_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmax_a_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES
ret void
@@ -315,8 +315,8 @@ declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fmax_a_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmax_a_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmax_a_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES
ret void
@@ -337,8 +337,8 @@ declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fmul_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
ret void
@@ -359,8 +359,8 @@ declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fmul_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
ret void
@@ -377,8 +377,8 @@ declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind
define void @fmul_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
%2 = fmul <4 x float> %0, %1
store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
ret void
@@ -393,8 +393,8 @@ entry:
define void @fmul_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
%2 = fmul <2 x double> %0, %1
store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
ret void
@@ -413,8 +413,8 @@ entry:
define void @llvm_mips_fsub_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
ret void
@@ -435,8 +435,8 @@ declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsub_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
ret void
@@ -454,8 +454,8 @@ declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind
define void @fsub_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
%2 = fsub <4 x float> %0, %1
store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
ret void
@@ -470,8 +470,8 @@ entry:
define void @fsub_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
%2 = fsub <2 x double> %0, %1
store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_4rf.ll b/test/CodeGen/Mips/msa/3rf_4rf.ll
index 67ef7fd..f1a3002 100644
--- a/test/CodeGen/Mips/msa/3rf_4rf.ll
+++ b/test/CodeGen/Mips/msa/3rf_4rf.ll
@@ -11,9 +11,9 @@
define void @llvm_mips_fmadd_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmadd_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmadd_w_ARG2
- %2 = load <4 x float>* @llvm_mips_fmadd_w_ARG3
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG2
+ %2 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG3
%3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES
ret void
@@ -36,9 +36,9 @@ declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) no
define void @llvm_mips_fmadd_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmadd_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmadd_d_ARG2
- %2 = load <2 x double>* @llvm_mips_fmadd_d_ARG3
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG2
+ %2 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG3
%3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES
ret void
@@ -61,9 +61,9 @@ declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double>
define void @llvm_mips_fmsub_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fmsub_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fmsub_w_ARG2
- %2 = load <4 x float>* @llvm_mips_fmsub_w_ARG3
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG2
+ %2 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG3
%3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES
ret void
@@ -86,9 +86,9 @@ declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) no
define void @llvm_mips_fmsub_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fmsub_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fmsub_d_ARG2
- %2 = load <2 x double>* @llvm_mips_fmsub_d_ARG3
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG2
+ %2 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG3
%3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
index de28be0..704c4b7 100644
--- a/test/CodeGen/Mips/msa/3rf_4rf_q.ll
+++ b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
@@ -11,9 +11,9 @@
define void @llvm_mips_madd_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
ret void
@@ -36,9 +36,9 @@ declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_madd_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
ret void
@@ -61,9 +61,9 @@ declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_maddr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
ret void
@@ -86,9 +86,9 @@ declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_maddr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
ret void
@@ -111,9 +111,9 @@ declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_msub_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
ret void
@@ -136,9 +136,9 @@ declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_msub_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
ret void
@@ -161,9 +161,9 @@ declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_msubr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
ret void
@@ -186,9 +186,9 @@ declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_msubr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_exdo.ll b/test/CodeGen/Mips/msa/3rf_exdo.ll
index 8a7f268..1b1b2e9 100644
--- a/test/CodeGen/Mips/msa/3rf_exdo.ll
+++ b/test/CodeGen/Mips/msa/3rf_exdo.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_fexdo_h_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fexdo_h_ARG1
- %1 = load <4 x float>* @llvm_mips_fexdo_h_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG2
%2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1)
store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES
ret void
@@ -32,8 +32,8 @@ declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fexdo_w_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fexdo_w_ARG1
- %1 = load <2 x double>* @llvm_mips_fexdo_w_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_float_int.ll b/test/CodeGen/Mips/msa/3rf_float_int.ll
index 7b01e17..2bd056d 100644
--- a/test/CodeGen/Mips/msa/3rf_float_int.ll
+++ b/test/CodeGen/Mips/msa/3rf_float_int.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_fexp2_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fexp2_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_fexp2_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fexp2_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_fexp2_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1)
store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES
ret void
@@ -32,8 +32,8 @@ declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind
define void @llvm_mips_fexp2_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fexp2_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_fexp2_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fexp2_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_fexp2_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1)
store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_int_float.ll b/test/CodeGen/Mips/msa/3rf_int_float.ll
index 5624771..545e543 100644
--- a/test/CodeGen/Mips/msa/3rf_int_float.ll
+++ b/test/CodeGen/Mips/msa/3rf_int_float.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_fcaf_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcaf_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcaf_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES
ret void
@@ -32,8 +32,8 @@ declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcaf_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcaf_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcaf_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES
ret void
@@ -54,8 +54,8 @@ declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fceq_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fceq_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fceq_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES
ret void
@@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fceq_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fceq_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fceq_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES
ret void
@@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcle_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcle_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcle_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES
ret void
@@ -120,8 +120,8 @@ declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcle_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcle_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcle_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES
ret void
@@ -142,8 +142,8 @@ declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fclt_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fclt_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fclt_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES
ret void
@@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fclt_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fclt_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fclt_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES
ret void
@@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcor_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcor_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcor_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES
ret void
@@ -208,8 +208,8 @@ declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcor_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcor_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcor_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES
ret void
@@ -230,8 +230,8 @@ declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcne_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcne_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcne_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES
ret void
@@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcne_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcne_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcne_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES
ret void
@@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcueq_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcueq_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcueq_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES
ret void
@@ -296,8 +296,8 @@ declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcueq_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcueq_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcueq_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES
ret void
@@ -318,8 +318,8 @@ declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcult_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcult_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcult_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES
ret void
@@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcult_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcult_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcult_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES
ret void
@@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcule_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcule_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcule_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES
ret void
@@ -384,8 +384,8 @@ declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcule_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcule_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcule_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES
ret void
@@ -406,8 +406,8 @@ declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcun_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcun_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcun_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES
ret void
@@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcun_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcun_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcun_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES
ret void
@@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fcune_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fcune_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fcune_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES
ret void
@@ -472,8 +472,8 @@ declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fcune_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fcune_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fcune_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES
ret void
@@ -494,8 +494,8 @@ declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsaf_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsaf_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsaf_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES
ret void
@@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsaf_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsaf_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsaf_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES
ret void
@@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fseq_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fseq_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fseq_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES
ret void
@@ -560,8 +560,8 @@ declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fseq_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fseq_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fseq_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES
ret void
@@ -582,8 +582,8 @@ declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsle_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsle_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsle_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES
ret void
@@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsle_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsle_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsle_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES
ret void
@@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fslt_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fslt_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fslt_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES
ret void
@@ -648,8 +648,8 @@ declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fslt_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fslt_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fslt_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES
ret void
@@ -670,8 +670,8 @@ declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsor_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsor_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsor_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES
ret void
@@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsor_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsor_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsor_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES
ret void
@@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsne_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsne_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsne_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES
ret void
@@ -736,8 +736,8 @@ declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsne_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsne_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsne_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES
ret void
@@ -758,8 +758,8 @@ declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsueq_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsueq_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsueq_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES
ret void
@@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsueq_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsueq_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsueq_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES
ret void
@@ -802,8 +802,8 @@ declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsult_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsult_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsult_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES
ret void
@@ -824,8 +824,8 @@ declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsult_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsult_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsult_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES
ret void
@@ -846,8 +846,8 @@ declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsule_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsule_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsule_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES
ret void
@@ -868,8 +868,8 @@ declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsule_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsule_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsule_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES
ret void
@@ -890,8 +890,8 @@ declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsun_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsun_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsun_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES
ret void
@@ -912,8 +912,8 @@ declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsun_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsun_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsun_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES
ret void
@@ -934,8 +934,8 @@ declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind
define void @llvm_mips_fsune_w_test() nounwind {
entry:
- %0 = load <4 x float>* @llvm_mips_fsune_w_ARG1
- %1 = load <4 x float>* @llvm_mips_fsune_w_ARG2
+ %0 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG1
+ %1 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES
ret void
@@ -956,8 +956,8 @@ declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind
define void @llvm_mips_fsune_d_test() nounwind {
entry:
- %0 = load <2 x double>* @llvm_mips_fsune_d_ARG1
- %1 = load <2 x double>* @llvm_mips_fsune_d_ARG2
+ %0 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG1
+ %1 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/3rf_q.ll b/test/CodeGen/Mips/msa/3rf_q.ll
index f7000ee..c8b0a50 100644
--- a/test/CodeGen/Mips/msa/3rf_q.ll
+++ b/test/CodeGen/Mips/msa/3rf_q.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_mul_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
ret void
@@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_mul_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
ret void
@@ -54,8 +54,8 @@ declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind
define void @llvm_mips_mulr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
ret void
@@ -76,8 +76,8 @@ declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind
define void @llvm_mips_mulr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
ret void
diff --git a/test/CodeGen/Mips/msa/arithmetic.ll b/test/CodeGen/Mips/msa/arithmetic.ll
index 09ee502..3ecd0e4 100644
--- a/test/CodeGen/Mips/msa/arithmetic.ll
+++ b/test/CodeGen/Mips/msa/arithmetic.ll
@@ -4,9 +4,9 @@
define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: add_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = add <16 x i8> %1, %2
; CHECK-DAG: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -20,9 +20,9 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: add_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = add <8 x i16> %1, %2
; CHECK-DAG: addv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -36,9 +36,9 @@ define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: add_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = add <4 x i32> %1, %2
; CHECK-DAG: addv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -52,9 +52,9 @@ define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: add_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = add <2 x i64> %1, %2
; CHECK-DAG: addv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -68,7 +68,7 @@ define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: add_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -83,7 +83,7 @@ define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: add_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
i16 1, i16 1, i16 1, i16 1>
@@ -98,7 +98,7 @@ define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: add_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1
@@ -112,7 +112,7 @@ define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: add_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = add <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1
@@ -126,9 +126,9 @@ define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: sub_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = sub <16 x i8> %1, %2
; CHECK-DAG: subv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -142,9 +142,9 @@ define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: sub_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = sub <8 x i16> %1, %2
; CHECK-DAG: subv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -158,9 +158,9 @@ define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: sub_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = sub <4 x i32> %1, %2
; CHECK-DAG: subv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -174,9 +174,9 @@ define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: sub_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = sub <2 x i64> %1, %2
; CHECK-DAG: subv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -190,7 +190,7 @@ define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: sub_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -205,7 +205,7 @@ define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: sub_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
i16 1, i16 1, i16 1, i16 1>
@@ -220,7 +220,7 @@ define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: sub_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1
@@ -234,7 +234,7 @@ define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: sub_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = sub <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1
@@ -248,9 +248,9 @@ define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: mul_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = mul <16 x i8> %1, %2
; CHECK-DAG: mulv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -264,9 +264,9 @@ define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: mul_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = mul <8 x i16> %1, %2
; CHECK-DAG: mulv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -280,9 +280,9 @@ define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: mul_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = mul <4 x i32> %1, %2
; CHECK-DAG: mulv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -296,9 +296,9 @@ define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: mul_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = mul <2 x i64> %1, %2
; CHECK-DAG: mulv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -313,11 +313,11 @@ define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: maddv_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>* %c
+ %3 = load <16 x i8>, <16 x i8>* %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = mul <16 x i8> %2, %3
%5 = add <16 x i8> %4, %1
@@ -333,11 +333,11 @@ define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: maddv_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>* %c
+ %3 = load <8 x i16>, <8 x i16>* %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = mul <8 x i16> %2, %3
%5 = add <8 x i16> %4, %1
@@ -353,11 +353,11 @@ define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: maddv_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>* %c
+ %3 = load <4 x i32>, <4 x i32>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = mul <4 x i32> %2, %3
%5 = add <4 x i32> %4, %1
@@ -373,11 +373,11 @@ define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: maddv_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>* %c
+ %3 = load <2 x i64>, <2 x i64>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = mul <2 x i64> %2, %3
%5 = add <2 x i64> %4, %1
@@ -393,11 +393,11 @@ define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: msubv_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>* %c
+ %3 = load <16 x i8>, <16 x i8>* %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = mul <16 x i8> %2, %3
%5 = sub <16 x i8> %1, %4
@@ -413,11 +413,11 @@ define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: msubv_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>* %c
+ %3 = load <8 x i16>, <8 x i16>* %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = mul <8 x i16> %2, %3
%5 = sub <8 x i16> %1, %4
@@ -433,11 +433,11 @@ define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: msubv_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>* %c
+ %3 = load <4 x i32>, <4 x i32>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = mul <4 x i32> %2, %3
%5 = sub <4 x i32> %1, %4
@@ -453,11 +453,11 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: msubv_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>* %c
+ %3 = load <2 x i64>, <2 x i64>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = mul <2 x i64> %2, %3
%5 = sub <2 x i64> %1, %4
@@ -472,9 +472,9 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: div_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = sdiv <16 x i8> %1, %2
; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -488,9 +488,9 @@ define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: div_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = sdiv <8 x i16> %1, %2
; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -504,9 +504,9 @@ define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: div_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = sdiv <4 x i32> %1, %2
; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -520,9 +520,9 @@ define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: div_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = sdiv <2 x i64> %1, %2
; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -536,9 +536,9 @@ define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: div_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = udiv <16 x i8> %1, %2
; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -552,9 +552,9 @@ define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: div_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = udiv <8 x i16> %1, %2
; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -568,9 +568,9 @@ define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: div_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = udiv <4 x i32> %1, %2
; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -584,9 +584,9 @@ define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: div_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = udiv <2 x i64> %1, %2
; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -600,9 +600,9 @@ define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: mod_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = srem <16 x i8> %1, %2
; CHECK-DAG: mod_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -616,9 +616,9 @@ define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: mod_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = srem <8 x i16> %1, %2
; CHECK-DAG: mod_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -632,9 +632,9 @@ define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: mod_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = srem <4 x i32> %1, %2
; CHECK-DAG: mod_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -648,9 +648,9 @@ define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: mod_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = srem <2 x i64> %1, %2
; CHECK-DAG: mod_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -664,9 +664,9 @@ define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: mod_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = urem <16 x i8> %1, %2
; CHECK-DAG: mod_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -680,9 +680,9 @@ define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: mod_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = urem <8 x i16> %1, %2
; CHECK-DAG: mod_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -696,9 +696,9 @@ define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: mod_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = urem <4 x i32> %1, %2
; CHECK-DAG: mod_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -712,9 +712,9 @@ define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: mod_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = urem <2 x i64> %1, %2
; CHECK-DAG: mod_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll
index 9aae284..d2ead53 100644
--- a/test/CodeGen/Mips/msa/arithmetic_float.ll
+++ b/test/CodeGen/Mips/msa/arithmetic_float.ll
@@ -4,9 +4,9 @@
define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: add_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fadd <4 x float> %1, %2
; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -20,9 +20,9 @@ define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: add_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fadd <2 x double> %1, %2
; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -36,9 +36,9 @@ define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou
define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: sub_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fsub <4 x float> %1, %2
; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -52,9 +52,9 @@ define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: sub_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fsub <2 x double> %1, %2
; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -68,9 +68,9 @@ define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou
define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: mul_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fmul <4 x float> %1, %2
; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -84,9 +84,9 @@ define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: mul_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fmul <2 x double> %1, %2
; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -101,11 +101,11 @@ define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
<4 x float>* %c) nounwind {
; CHECK: fma_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x float>* %c
+ %3 = load <4 x float>, <4 x float>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2,
<4 x float> %3)
@@ -121,11 +121,11 @@ define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
<2 x double>* %c) nounwind {
; CHECK: fma_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x double>* %c
+ %3 = load <2 x double>, <2 x double>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2,
<2 x double> %3)
@@ -141,11 +141,11 @@ define void @fmsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
<4 x float>* %c) nounwind {
; CHECK: fmsub_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x float>* %c
+ %3 = load <4 x float>, <4 x float>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = fmul <4 x float> %2, %3
%5 = fsub <4 x float> %1, %4
@@ -161,11 +161,11 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
<2 x double>* %c) nounwind {
; CHECK: fmsub_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x double>* %c
+ %3 = load <2 x double>, <2 x double>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = fmul <2 x double> %2, %3
%5 = fsub <2 x double> %1, %4
@@ -180,9 +180,9 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: fdiv_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fdiv <4 x float> %1, %2
; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -196,9 +196,9 @@ define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounw
define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: fdiv_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fdiv <2 x double> %1, %2
; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -212,7 +212,7 @@ define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) no
define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
; CHECK: fabs_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1)
; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
@@ -226,7 +226,7 @@ define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
; CHECK: fabs_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1)
; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]]
@@ -240,7 +240,7 @@ define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
; CHECK: fexp2_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
@@ -256,7 +256,7 @@ define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
; CHECK: fexp2_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
@@ -272,7 +272,7 @@ define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
; CHECK: fexp2_v4f32_2:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
%3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2
@@ -289,7 +289,7 @@ define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
; CHECK: fexp2_v2f64_2:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
%3 = fmul <2 x double> <double 2.0, double 2.0>, %2
@@ -306,7 +306,7 @@ define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
; CHECK: fsqrt_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1)
; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]]
@@ -320,7 +320,7 @@ define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
; CHECK: fsqrt_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1)
; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]]
@@ -334,7 +334,7 @@ define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
; CHECK: ffint_u_v4f32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = uitofp <4 x i32> %1 to <4 x float>
; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]]
@@ -348,7 +348,7 @@ define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
; CHECK: ffint_u_v2f64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = uitofp <2 x i64> %1 to <2 x double>
; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]]
@@ -362,7 +362,7 @@ define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
; CHECK: ffint_s_v4f32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = sitofp <4 x i32> %1 to <4 x float>
; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]]
@@ -376,7 +376,7 @@ define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
; CHECK: ffint_s_v2f64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = sitofp <2 x i64> %1 to <2 x double>
; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]]
@@ -390,7 +390,7 @@ define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
; CHECK: ftrunc_u_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = fptoui <4 x float> %1 to <4 x i32>
; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]]
@@ -404,7 +404,7 @@ define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
; CHECK: ftrunc_u_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = fptoui <2 x double> %1 to <2 x i64>
; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]]
@@ -418,7 +418,7 @@ define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
; CHECK: ftrunc_s_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = fptosi <4 x float> %1 to <4 x i32>
; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]]
@@ -432,7 +432,7 @@ define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
; CHECK: ftrunc_s_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = fptosi <2 x double> %1 to <2 x i64>
; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]]
diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll
index dbdf42b..97525be 100644
--- a/test/CodeGen/Mips/msa/basic_operations.ll
+++ b/test/CodeGen/Mips/msa/basic_operations.ll
@@ -258,7 +258,7 @@ define void @nonconst_v2i64(i64 %a, i64 %b) nounwind {
define i32 @extract_sext_v16i8() nounwind {
; MIPS32-AE-LABEL: extract_sext_v16i8:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
%2 = add <16 x i8> %1, %1
@@ -277,7 +277,7 @@ define i32 @extract_sext_v16i8() nounwind {
define i32 @extract_sext_v8i16() nounwind {
; MIPS32-AE-LABEL: extract_sext_v8i16:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
%2 = add <8 x i16> %1, %1
@@ -296,7 +296,7 @@ define i32 @extract_sext_v8i16() nounwind {
define i32 @extract_sext_v4i32() nounwind {
; MIPS32-AE-LABEL: extract_sext_v4i32:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = add <4 x i32> %1, %1
@@ -312,7 +312,7 @@ define i32 @extract_sext_v4i32() nounwind {
define i64 @extract_sext_v2i64() nounwind {
; MIPS32-AE-LABEL: extract_sext_v2i64:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = add <2 x i64> %1, %1
@@ -331,7 +331,7 @@ define i64 @extract_sext_v2i64() nounwind {
define i32 @extract_zext_v16i8() nounwind {
; MIPS32-AE-LABEL: extract_zext_v16i8:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
%2 = add <16 x i8> %1, %1
@@ -349,7 +349,7 @@ define i32 @extract_zext_v16i8() nounwind {
define i32 @extract_zext_v8i16() nounwind {
; MIPS32-AE-LABEL: extract_zext_v8i16:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
%2 = add <8 x i16> %1, %1
@@ -367,7 +367,7 @@ define i32 @extract_zext_v8i16() nounwind {
define i32 @extract_zext_v4i32() nounwind {
; MIPS32-AE-LABEL: extract_zext_v4i32:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = add <4 x i32> %1, %1
@@ -383,7 +383,7 @@ define i32 @extract_zext_v4i32() nounwind {
define i64 @extract_zext_v2i64() nounwind {
; MIPS32-AE-LABEL: extract_zext_v2i64:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = add <2 x i64> %1, %1
@@ -401,14 +401,14 @@ define i64 @extract_zext_v2i64() nounwind {
define i32 @extract_sext_v16i8_vidx() nounwind {
; MIPS32-AE-LABEL: extract_sext_v16i8_vidx:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)(
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <16 x i8> %1, %1
; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -425,14 +425,14 @@ define i32 @extract_sext_v16i8_vidx() nounwind {
define i32 @extract_sext_v8i16_vidx() nounwind {
; MIPS32-AE-LABEL: extract_sext_v8i16_vidx:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)(
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <8 x i16> %1, %1
; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -449,14 +449,14 @@ define i32 @extract_sext_v8i16_vidx() nounwind {
define i32 @extract_sext_v4i32_vidx() nounwind {
; MIPS32-AE-LABEL: extract_sext_v4i32_vidx:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)(
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <4 x i32> %1, %1
; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -472,14 +472,14 @@ define i32 @extract_sext_v4i32_vidx() nounwind {
define i64 @extract_sext_v2i64_vidx() nounwind {
; MIPS32-AE-LABEL: extract_sext_v2i64_vidx:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)(
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <2 x i64> %1, %1
; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -497,14 +497,14 @@ define i64 @extract_sext_v2i64_vidx() nounwind {
define i32 @extract_zext_v16i8_vidx() nounwind {
; MIPS32-AE-LABEL: extract_zext_v16i8_vidx:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)(
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <16 x i8> %1, %1
; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -521,14 +521,14 @@ define i32 @extract_zext_v16i8_vidx() nounwind {
define i32 @extract_zext_v8i16_vidx() nounwind {
; MIPS32-AE-LABEL: extract_zext_v8i16_vidx:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)(
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <8 x i16> %1, %1
; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -545,14 +545,14 @@ define i32 @extract_zext_v8i16_vidx() nounwind {
define i32 @extract_zext_v4i32_vidx() nounwind {
; MIPS32-AE-LABEL: extract_zext_v4i32_vidx:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)(
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <4 x i32> %1, %1
; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -568,14 +568,14 @@ define i32 @extract_zext_v4i32_vidx() nounwind {
define i64 @extract_zext_v2i64_vidx() nounwind {
; MIPS32-AE-LABEL: extract_zext_v2i64_vidx:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)(
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = add <2 x i64> %1, %1
; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -593,7 +593,7 @@ define i64 @extract_zext_v2i64_vidx() nounwind {
define void @insert_v16i8(i32 %a) nounwind {
; MIPS32-AE-LABEL: insert_v16i8:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
%a2 = trunc i32 %a to i8
@@ -615,7 +615,7 @@ define void @insert_v16i8(i32 %a) nounwind {
define void @insert_v8i16(i32 %a) nounwind {
; MIPS32-AE-LABEL: insert_v8i16:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
%a2 = trunc i32 %a to i16
@@ -637,7 +637,7 @@ define void @insert_v8i16(i32 %a) nounwind {
define void @insert_v4i32(i32 %a) nounwind {
; MIPS32-AE-LABEL: insert_v4i32:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
; MIPS32-AE-NOT: andi
@@ -656,7 +656,7 @@ define void @insert_v4i32(i32 %a) nounwind {
define void @insert_v2i64(i64 %a) nounwind {
; MIPS32-AE-LABEL: insert_v2i64:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
; MIPS32-AE-NOT: andi
@@ -676,10 +676,10 @@ define void @insert_v2i64(i64 %a) nounwind {
define void @insert_v16i8_vidx(i32 %a) nounwind {
; MIPS32-AE: insert_v16i8_vidx:
- %1 = load <16 x i8>* @v16i8
+ %1 = load <16 x i8>, <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -705,10 +705,10 @@ define void @insert_v16i8_vidx(i32 %a) nounwind {
define void @insert_v8i16_vidx(i32 %a) nounwind {
; MIPS32-AE: insert_v8i16_vidx:
- %1 = load <8 x i16>* @v8i16
+ %1 = load <8 x i16>, <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -735,10 +735,10 @@ define void @insert_v8i16_vidx(i32 %a) nounwind {
define void @insert_v4i32_vidx(i32 %a) nounwind {
; MIPS32-AE: insert_v4i32_vidx:
- %1 = load <4 x i32>* @v4i32
+ %1 = load <4 x i32>, <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -762,10 +762,10 @@ define void @insert_v4i32_vidx(i32 %a) nounwind {
define void @insert_v2i64_vidx(i64 %a) nounwind {
; MIPS32-AE: insert_v2i64_vidx:
- %1 = load <2 x i64>* @v2i64
+ %1 = load <2 x i64>, <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll
index a0c9d29..53c1f11 100644
--- a/test/CodeGen/Mips/msa/basic_operations_float.ll
+++ b/test/CodeGen/Mips/msa/basic_operations_float.ll
@@ -75,7 +75,7 @@ define void @const_v2f64() nounwind {
define void @nonconst_v4f32() nounwind {
; MIPS32-LABEL: nonconst_v4f32:
- %1 = load float *@f32
+ %1 = load float , float *@f32
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
@@ -91,7 +91,7 @@ define void @nonconst_v4f32() nounwind {
define void @nonconst_v2f64() nounwind {
; MIPS32-LABEL: nonconst_v2f64:
- %1 = load double *@f64
+ %1 = load double , double *@f64
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
store volatile <2 x double> %3, <2 x double>*@v2f64
@@ -105,7 +105,7 @@ define void @nonconst_v2f64() nounwind {
define float @extract_v4f32() nounwind {
; MIPS32-LABEL: extract_v4f32:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
@@ -123,7 +123,7 @@ define float @extract_v4f32() nounwind {
define float @extract_v4f32_elt0() nounwind {
; MIPS32-LABEL: extract_v4f32_elt0:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
@@ -141,7 +141,7 @@ define float @extract_v4f32_elt0() nounwind {
define float @extract_v4f32_elt2() nounwind {
; MIPS32-LABEL: extract_v4f32_elt2:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
@@ -159,14 +159,14 @@ define float @extract_v4f32_elt2() nounwind {
define float @extract_v4f32_vidx() nounwind {
; MIPS32-LABEL: extract_v4f32_vidx:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = fadd <4 x float> %1, %1
; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -180,7 +180,7 @@ define float @extract_v4f32_vidx() nounwind {
define double @extract_v2f64() nounwind {
; MIPS32-LABEL: extract_v2f64:
- %1 = load <2 x double>* @v2f64
+ %1 = load <2 x double>, <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = fadd <2 x double> %1, %1
@@ -203,7 +203,7 @@ define double @extract_v2f64() nounwind {
define double @extract_v2f64_elt0() nounwind {
; MIPS32-LABEL: extract_v2f64_elt0:
- %1 = load <2 x double>* @v2f64
+ %1 = load <2 x double>, <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = fadd <2 x double> %1, %1
@@ -224,14 +224,14 @@ define double @extract_v2f64_elt0() nounwind {
define double @extract_v2f64_vidx() nounwind {
; MIPS32-LABEL: extract_v2f64_vidx:
- %1 = load <2 x double>* @v2f64
+ %1 = load <2 x double>, <2 x double>* @v2f64
; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
%2 = fadd <2 x double> %1, %1
; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32* @i32
+ %3 = load i32, i32* @i32
; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -245,7 +245,7 @@ define double @extract_v2f64_vidx() nounwind {
define void @insert_v4f32(float %a) nounwind {
; MIPS32-LABEL: insert_v4f32:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = insertelement <4 x float> %1, float %a, i32 1
@@ -262,7 +262,7 @@ define void @insert_v4f32(float %a) nounwind {
define void @insert_v2f64(double %a) nounwind {
; MIPS32-LABEL: insert_v2f64:
- %1 = load <2 x double>* @v2f64
+ %1 = load <2 x double>, <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = insertelement <2 x double> %1, double %a, i32 1
@@ -279,11 +279,11 @@ define void @insert_v2f64(double %a) nounwind {
define void @insert_v4f32_vidx(float %a) nounwind {
; MIPS32-LABEL: insert_v4f32_vidx:
- %1 = load <4 x float>* @v4f32
+ %1 = load <4 x float>, <4 x float>* @v4f32
; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
@@ -305,11 +305,11 @@ define void @insert_v4f32_vidx(float %a) nounwind {
define void @insert_v2f64_vidx(double %a) nounwind {
; MIPS32-LABEL: insert_v2f64_vidx:
- %1 = load <2 x double>* @v2f64
+ %1 = load <2 x double>, <2 x double>* @v2f64
; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
- %2 = load i32* @i32
+ %2 = load i32, i32* @i32
; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
diff --git a/test/CodeGen/Mips/msa/bit.ll b/test/CodeGen/Mips/msa/bit.ll
index 59ddbe1..f005730 100644
--- a/test/CodeGen/Mips/msa/bit.ll
+++ b/test/CodeGen/Mips/msa/bit.ll
@@ -8,7 +8,7 @@
define void @llvm_mips_sat_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
ret void
@@ -27,7 +27,7 @@ declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_sat_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
ret void
@@ -46,7 +46,7 @@ declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_sat_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
ret void
@@ -65,7 +65,7 @@ declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_sat_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
ret void
@@ -84,7 +84,7 @@ declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_sat_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
ret void
@@ -103,7 +103,7 @@ declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_sat_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
ret void
@@ -122,7 +122,7 @@ declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_sat_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
ret void
@@ -141,7 +141,7 @@ declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_sat_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
ret void
@@ -160,7 +160,7 @@ declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind
define void @llvm_mips_slli_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_slli_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
ret void
@@ -179,7 +179,7 @@ declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind
define void @llvm_mips_slli_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_slli_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
ret void
@@ -198,7 +198,7 @@ declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind
define void @llvm_mips_slli_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_slli_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
ret void
@@ -217,7 +217,7 @@ declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind
define void @llvm_mips_slli_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_slli_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
ret void
@@ -236,7 +236,7 @@ declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind
define void @llvm_mips_srai_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srai_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
ret void
@@ -255,7 +255,7 @@ declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind
define void @llvm_mips_srai_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srai_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
ret void
@@ -274,7 +274,7 @@ declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind
define void @llvm_mips_srai_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srai_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
ret void
@@ -293,7 +293,7 @@ declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind
define void @llvm_mips_srai_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srai_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
ret void
@@ -312,7 +312,7 @@ declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind
define void @llvm_mips_srari_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srari_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES
ret void
@@ -331,7 +331,7 @@ declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind
define void @llvm_mips_srari_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srari_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES
ret void
@@ -350,7 +350,7 @@ declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind
define void @llvm_mips_srari_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srari_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES
ret void
@@ -369,7 +369,7 @@ declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind
define void @llvm_mips_srari_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srari_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES
ret void
@@ -388,7 +388,7 @@ declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind
define void @llvm_mips_srli_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srli_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
ret void
@@ -407,7 +407,7 @@ declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind
define void @llvm_mips_srli_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srli_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
ret void
@@ -426,7 +426,7 @@ declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind
define void @llvm_mips_srli_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srli_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
ret void
@@ -445,7 +445,7 @@ declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind
define void @llvm_mips_srli_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srli_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
ret void
@@ -464,7 +464,7 @@ declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind
define void @llvm_mips_srlri_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlri_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES
ret void
@@ -483,7 +483,7 @@ declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind
define void @llvm_mips_srlri_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlri_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES
ret void
@@ -502,7 +502,7 @@ declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind
define void @llvm_mips_srlri_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlri_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES
ret void
@@ -521,7 +521,7 @@ declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind
define void @llvm_mips_srlri_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlri_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/bitcast.ll b/test/CodeGen/Mips/msa/bitcast.ll
index 8e880ec..837cc28 100644
--- a/test/CodeGen/Mips/msa/bitcast.ll
+++ b/test/CodeGen/Mips/msa/bitcast.ll
@@ -5,7 +5,7 @@
define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -29,7 +29,7 @@ entry:
define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -56,7 +56,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -77,7 +77,7 @@ entry:
define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -102,7 +102,7 @@ entry:
define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -127,7 +127,7 @@ entry:
define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -153,7 +153,7 @@ entry:
define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>* %src
+ %0 = load volatile <16 x i8>, <16 x i8>* %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
@@ -179,7 +179,7 @@ entry:
define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -204,7 +204,7 @@ entry:
define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -230,7 +230,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -251,7 +251,7 @@ entry:
define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -276,7 +276,7 @@ entry:
define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -301,7 +301,7 @@ entry:
define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -326,7 +326,7 @@ entry:
define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>* %src
+ %0 = load volatile <8 x i16>, <8 x i16>* %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
@@ -354,7 +354,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <16 x i8>
%2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* %dst
@@ -378,7 +378,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <8 x i16>
%2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* %dst
@@ -403,7 +403,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <8 x half>
store <8 x half> %1, <8 x half>* %dst
ret void
@@ -423,7 +423,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <4 x i32>
%2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* %dst
@@ -447,7 +447,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <4 x float>
%2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1)
store <4 x float> %2, <4 x float>* %dst
@@ -471,7 +471,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <2 x i64>
%2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* %dst
@@ -495,7 +495,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <8 x half>* %src
+ %0 = load volatile <8 x half>, <8 x half>* %src
%1 = bitcast <8 x half> %0 to <2 x double>
%2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1)
store <2 x double> %2, <2 x double>* %dst
@@ -518,7 +518,7 @@ entry:
define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -543,7 +543,7 @@ entry:
define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -570,7 +570,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -591,7 +591,7 @@ entry:
define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -615,7 +615,7 @@ entry:
define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -639,7 +639,7 @@ entry:
define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -664,7 +664,7 @@ entry:
define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>* %src
+ %0 = load volatile <4 x i32>, <4 x i32>* %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
@@ -689,7 +689,7 @@ entry:
define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -714,7 +714,7 @@ entry:
define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -741,7 +741,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -762,7 +762,7 @@ entry:
define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -786,7 +786,7 @@ entry:
define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -810,7 +810,7 @@ entry:
define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -835,7 +835,7 @@ entry:
define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <4 x float>* %src
+ %0 = load volatile <4 x float>, <4 x float>* %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
@@ -860,7 +860,7 @@ entry:
define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -886,7 +886,7 @@ entry:
define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -913,7 +913,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -934,7 +934,7 @@ entry:
define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -959,7 +959,7 @@ entry:
define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -984,7 +984,7 @@ entry:
define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -1008,7 +1008,7 @@ entry:
define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>* %src
+ %0 = load volatile <2 x i64>, <2 x i64>* %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
@@ -1032,7 +1032,7 @@ entry:
define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
@@ -1058,7 +1058,7 @@ entry:
define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
@@ -1085,7 +1085,7 @@ entry:
; are no operations for v8f16 to put in the way.
define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <8 x half>
store <8 x half> %2, <8 x half>* %dst
@@ -1106,7 +1106,7 @@ entry:
define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
@@ -1131,7 +1131,7 @@ entry:
define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
@@ -1156,7 +1156,7 @@ entry:
define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
@@ -1180,7 +1180,7 @@ entry:
define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind {
entry:
- %0 = load volatile <2 x double>* %src
+ %0 = load volatile <2 x double>, <2 x double>* %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll
index 5d57198..2a260b2 100644
--- a/test/CodeGen/Mips/msa/bitwise.ll
+++ b/test/CodeGen/Mips/msa/bitwise.ll
@@ -4,9 +4,9 @@
define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: and_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = and <16 x i8> %1, %2
; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -20,9 +20,9 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: and_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = and <8 x i16> %1, %2
; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -36,9 +36,9 @@ define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: and_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = and <4 x i32> %1, %2
; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -52,9 +52,9 @@ define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: and_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = and <2 x i64> %1, %2
; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -68,7 +68,7 @@ define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: and_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: andi.b [[R4:\$w[0-9]+]], [[R1]], 1
@@ -82,7 +82,7 @@ define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: and_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
@@ -97,7 +97,7 @@ define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: and_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
@@ -112,7 +112,7 @@ define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: and_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = and <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
@@ -127,9 +127,9 @@ define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: or_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = or <16 x i8> %1, %2
; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -143,9 +143,9 @@ define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: or_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = or <8 x i16> %1, %2
; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -159,9 +159,9 @@ define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: or_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = or <4 x i32> %1, %2
; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -175,9 +175,9 @@ define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: or_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = or <2 x i64> %1, %2
; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -191,7 +191,7 @@ define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: or_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 3
@@ -205,7 +205,7 @@ define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: or_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
@@ -220,7 +220,7 @@ define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: or_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
@@ -235,7 +235,7 @@ define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: or_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = or <2 x i64> %1, <i64 3, i64 3>
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
@@ -250,9 +250,9 @@ define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: nor_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = or <16 x i8> %1, %2
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -267,9 +267,9 @@ define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: nor_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = or <8 x i16> %1, %2
%4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -284,9 +284,9 @@ define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: nor_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = or <4 x i32> %1, %2
%4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -301,9 +301,9 @@ define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: nor_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = or <2 x i64> %1, %2
%4 = xor <2 x i64> %3, <i64 -1, i64 -1>
@@ -318,7 +318,7 @@ define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: nor_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -333,7 +333,7 @@ define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: nor_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -349,7 +349,7 @@ define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: nor_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -365,7 +365,7 @@ define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: nor_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = or <2 x i64> %1, <i64 1, i64 1>
%3 = xor <2 x i64> %2, <i64 -1, i64 -1>
@@ -381,9 +381,9 @@ define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: xor_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = xor <16 x i8> %1, %2
; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -397,9 +397,9 @@ define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: xor_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = xor <8 x i16> %1, %2
; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -413,9 +413,9 @@ define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: xor_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = xor <4 x i32> %1, %2
; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -429,9 +429,9 @@ define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: xor_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = xor <2 x i64> %1, %2
; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -445,7 +445,7 @@ define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: xor_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
; CHECK-DAG: xori.b [[R4:\$w[0-9]+]], [[R1]], 3
@@ -459,7 +459,7 @@ define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: xor_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
@@ -474,7 +474,7 @@ define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: xor_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
@@ -489,7 +489,7 @@ define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: xor_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = xor <2 x i64> %1, <i64 3, i64 3>
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
@@ -504,9 +504,9 @@ define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: sll_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shl <16 x i8> %1, %2
; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -520,9 +520,9 @@ define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: sll_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shl <8 x i16> %1, %2
; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -536,9 +536,9 @@ define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: sll_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shl <4 x i32> %1, %2
; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -552,9 +552,9 @@ define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: sll_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shl <2 x i64> %1, %2
; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -568,7 +568,7 @@ define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: sll_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: slli.b [[R4:\$w[0-9]+]], [[R1]], 1
@@ -582,7 +582,7 @@ define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: sll_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: slli.h [[R4:\$w[0-9]+]], [[R1]], 1
@@ -596,7 +596,7 @@ define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: sll_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: slli.w [[R4:\$w[0-9]+]], [[R1]], 1
@@ -610,7 +610,7 @@ define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: sll_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shl <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: slli.d [[R4:\$w[0-9]+]], [[R1]], 1
@@ -624,9 +624,9 @@ define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: sra_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = ashr <16 x i8> %1, %2
; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -640,9 +640,9 @@ define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: sra_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = ashr <8 x i16> %1, %2
; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -656,9 +656,9 @@ define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: sra_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = ashr <4 x i32> %1, %2
; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -672,9 +672,9 @@ define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: sra_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = ashr <2 x i64> %1, %2
; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -688,7 +688,7 @@ define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: sra_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: srai.b [[R4:\$w[0-9]+]], [[R1]], 1
@@ -702,7 +702,7 @@ define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: sra_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: srai.h [[R4:\$w[0-9]+]], [[R1]], 1
@@ -716,7 +716,7 @@ define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: sra_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: srai.w [[R4:\$w[0-9]+]], [[R1]], 1
@@ -730,7 +730,7 @@ define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: sra_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = ashr <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R1]], 1
@@ -744,9 +744,9 @@ define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: srl_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = lshr <16 x i8> %1, %2
; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -760,9 +760,9 @@ define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: srl_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = lshr <8 x i16> %1, %2
; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -776,9 +776,9 @@ define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: srl_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = lshr <4 x i32> %1, %2
; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -792,9 +792,9 @@ define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: srl_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = lshr <2 x i64> %1, %2
; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -808,7 +808,7 @@ define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: srl_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: srli.b [[R4:\$w[0-9]+]], [[R1]], 1
@@ -822,7 +822,7 @@ define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: srl_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: srli.h [[R4:\$w[0-9]+]], [[R1]], 1
@@ -836,7 +836,7 @@ define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: srl_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: srli.w [[R4:\$w[0-9]+]], [[R1]], 1
@@ -850,7 +850,7 @@ define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: srl_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = lshr <2 x i64> %1, <i64 1, i64 1>
; CHECK-DAG: srli.d [[R4:\$w[0-9]+]], [[R1]], 1
@@ -864,7 +864,7 @@ define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: ctpop_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1)
; CHECK-DAG: pcnt.b [[R3:\$w[0-9]+]], [[R1]]
@@ -878,7 +878,7 @@ define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: ctpop_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1)
; CHECK-DAG: pcnt.h [[R3:\$w[0-9]+]], [[R1]]
@@ -892,7 +892,7 @@ define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: ctpop_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1)
; CHECK-DAG: pcnt.w [[R3:\$w[0-9]+]], [[R1]]
@@ -906,7 +906,7 @@ define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: ctpop_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1)
; CHECK-DAG: pcnt.d [[R3:\$w[0-9]+]], [[R1]]
@@ -920,7 +920,7 @@ define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: ctlz_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]]
@@ -934,7 +934,7 @@ define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: ctlz_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]]
@@ -948,7 +948,7 @@ define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: ctlz_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]]
@@ -962,7 +962,7 @@ define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: ctlz_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]]
@@ -976,11 +976,11 @@ define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind {
; CHECK: bsel_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>* %m
+ %3 = load <16 x i8>, <16 x i8>* %m
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
@@ -1002,9 +1002,9 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>*
define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind {
; CHECK: bsel_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %m
+ %2 = load <16 x i8>, <16 x i8>* %m
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($6)
%3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
@@ -1027,9 +1027,9 @@ define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind
define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: bsel_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6,
i16 6, i16 6, i16 6, i16 6>
@@ -1048,9 +1048,9 @@ define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: bsel_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6>
%4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289>
@@ -1067,9 +1067,9 @@ define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: bsel_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = and <2 x i64> %1, <i64 6, i64 6>
%4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609>
@@ -1086,9 +1086,9 @@ define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: binsl_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192,
i8 192, i8 192, i8 192, i8 192,
@@ -1110,9 +1110,9 @@ define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: binsl_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152,
i16 49152, i16 49152, i16 49152, i16 49152>
@@ -1130,9 +1130,9 @@ define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: binsl_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
%4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
@@ -1148,9 +1148,9 @@ define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: binsl_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608>
%4 = and <2 x i64> %2, <i64 7, i64 7>
@@ -1170,9 +1170,9 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: binsr_v16i8_i:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3,
i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
@@ -1192,9 +1192,9 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: binsr_v8i16_i:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3,
i16 3, i16 3, i16 3, i16 3>
@@ -1212,9 +1212,9 @@ define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: binsr_v4i32_i:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
%4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
@@ -1230,9 +1230,9 @@ define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: binsr_v2i64_i:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = and <2 x i64> %1, <i64 3, i64 3>
%4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
@@ -1248,9 +1248,9 @@ define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: bclr_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -1266,9 +1266,9 @@ define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: bclr_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -1284,9 +1284,9 @@ define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: bclr_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -1302,9 +1302,9 @@ define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: bclr_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = xor <2 x i64> %3, <i64 -1, i64 -1>
@@ -1320,9 +1320,9 @@ define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: bset_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = or <16 x i8> %1, %3
@@ -1337,9 +1337,9 @@ define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: bset_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = or <8 x i16> %1, %3
@@ -1354,9 +1354,9 @@ define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: bset_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = or <4 x i32> %1, %3
@@ -1371,9 +1371,9 @@ define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: bset_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = or <2 x i64> %1, %3
@@ -1388,9 +1388,9 @@ define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: bneg_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = xor <16 x i8> %1, %3
@@ -1405,9 +1405,9 @@ define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: bneg_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = xor <8 x i16> %1, %3
@@ -1422,9 +1422,9 @@ define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: bneg_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = xor <4 x i32> %1, %3
@@ -1439,9 +1439,9 @@ define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: bneg_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = xor <2 x i64> %1, %3
@@ -1456,7 +1456,7 @@ define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: bclri_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = xor <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>,
<i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -1473,7 +1473,7 @@ define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: bclri_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = xor <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>,
<i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -1489,7 +1489,7 @@ define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: bclri_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = xor <4 x i32> <i32 8, i32 8, i32 8, i32 8>,
<i32 -1, i32 -1, i32 -1, i32 -1>
@@ -1505,7 +1505,7 @@ define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: bclri_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = xor <2 x i64> <i64 8, i64 8>,
<i64 -1, i64 -1>
@@ -1521,7 +1521,7 @@ define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: bseti_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
; CHECK-DAG: bseti.b [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1535,7 +1535,7 @@ define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: bseti_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
; CHECK-DAG: bseti.h [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1549,7 +1549,7 @@ define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: bseti_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
; CHECK-DAG: bseti.w [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1563,7 +1563,7 @@ define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: bseti_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = or <2 x i64> %1, <i64 8, i64 8>
; CHECK-DAG: bseti.d [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1577,7 +1577,7 @@ define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: bnegi_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
; CHECK-DAG: bnegi.b [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1591,7 +1591,7 @@ define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: bnegi_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
; CHECK-DAG: bnegi.h [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1605,7 +1605,7 @@ define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: bnegi_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
; CHECK-DAG: bnegi.w [[R3:\$w[0-9]+]], [[R1]], 3
@@ -1619,7 +1619,7 @@ define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: bnegi_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = xor <2 x i64> %1, <i64 8, i64 8>
; CHECK-DAG: bnegi.d [[R3:\$w[0-9]+]], [[R1]], 3
diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll
index 87ca148..bc4f6e7 100644
--- a/test/CodeGen/Mips/msa/compare.ll
+++ b/test/CodeGen/Mips/msa/compare.ll
@@ -4,9 +4,9 @@
define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: ceq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -21,9 +21,9 @@ define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: ceq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -38,9 +38,9 @@ define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: ceq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -55,9 +55,9 @@ define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: ceq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -72,9 +72,9 @@ define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: cle_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -89,9 +89,9 @@ define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: cle_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -106,9 +106,9 @@ define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: cle_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -123,9 +123,9 @@ define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: cle_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -140,9 +140,9 @@ define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: cle_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -157,9 +157,9 @@ define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: cle_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -174,9 +174,9 @@ define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: cle_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -191,9 +191,9 @@ define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: cle_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -208,9 +208,9 @@ define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: clt_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -225,9 +225,9 @@ define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: clt_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -242,9 +242,9 @@ define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: clt_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -259,9 +259,9 @@ define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: clt_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -276,9 +276,9 @@ define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: clt_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -293,9 +293,9 @@ define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: clt_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -310,9 +310,9 @@ define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: clt_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -327,9 +327,9 @@ define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: clt_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -345,9 +345,9 @@ define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; issues in this area.
define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: cne_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
@@ -365,9 +365,9 @@ define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: cne_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
@@ -387,9 +387,9 @@ define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: cne_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -409,9 +409,9 @@ define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: cne_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -429,7 +429,7 @@ define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: ceqi_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
@@ -444,7 +444,7 @@ define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: ceqi_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
@@ -459,7 +459,7 @@ define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: ceqi_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -474,7 +474,7 @@ define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: ceqi_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -489,7 +489,7 @@ define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: clei_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
@@ -504,7 +504,7 @@ define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: clei_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
@@ -519,7 +519,7 @@ define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: clei_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -534,7 +534,7 @@ define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: clei_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -549,7 +549,7 @@ define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: clei_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
@@ -564,7 +564,7 @@ define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: clei_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
@@ -579,7 +579,7 @@ define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: clei_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -594,7 +594,7 @@ define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: clei_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -609,7 +609,7 @@ define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: clti_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
@@ -624,7 +624,7 @@ define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: clti_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
@@ -639,7 +639,7 @@ define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: clti_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -654,7 +654,7 @@ define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: clti_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -669,7 +669,7 @@ define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: clti_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
@@ -684,7 +684,7 @@ define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: clti_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
@@ -699,7 +699,7 @@ define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: clti_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -714,7 +714,7 @@ define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: clti_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -730,11 +730,11 @@ define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: bsel_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>* %c
+ %3 = load <16 x i8>, <16 x i8>* %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <16 x i8> %1, %2
; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -752,11 +752,11 @@ define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: bsel_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>* %c
+ %3 = load <8 x i16>, <8 x i16>* %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -774,11 +774,11 @@ define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: bsel_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>* %c
+ %3 = load <4 x i32>, <4 x i32>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -796,11 +796,11 @@ define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: bsel_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>* %c
+ %3 = load <2 x i64>, <2 x i64>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -818,11 +818,11 @@ define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: bsel_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>* %c
+ %3 = load <16 x i8>, <16 x i8>* %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <16 x i8> %1, %2
; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -840,11 +840,11 @@ define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: bsel_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>* %c
+ %3 = load <8 x i16>, <8 x i16>* %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -862,11 +862,11 @@ define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: bsel_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>* %c
+ %3 = load <4 x i32>, <4 x i32>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -884,11 +884,11 @@ define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: bsel_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>* %c
+ %3 = load <2 x i64>, <2 x i64>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -906,9 +906,9 @@ define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: bseli_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <16 x i8> %1, %2
; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -925,9 +925,9 @@ define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: bseli_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -945,9 +945,9 @@ define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: bseli_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -965,9 +965,9 @@ define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: bseli_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -985,9 +985,9 @@ define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
<16 x i8>* %c) nounwind {
; CHECK: bseli_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <16 x i8> %1, %2
; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -1004,9 +1004,9 @@ define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
<8 x i16>* %c) nounwind {
; CHECK: bseli_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -1024,9 +1024,9 @@ define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
<4 x i32>* %c) nounwind {
; CHECK: bseli_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -1044,9 +1044,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
<2 x i64>* %c) nounwind {
; CHECK: bseli_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -1063,9 +1063,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: max_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1080,9 +1080,9 @@ define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: max_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1097,9 +1097,9 @@ define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: max_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1114,9 +1114,9 @@ define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: max_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1131,9 +1131,9 @@ define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: max_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1148,9 +1148,9 @@ define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: max_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1165,9 +1165,9 @@ define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: max_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1182,9 +1182,9 @@ define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: max_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1199,9 +1199,9 @@ define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: max_s_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1216,9 +1216,9 @@ define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin
define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: max_s_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1233,9 +1233,9 @@ define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin
define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: max_s_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1250,9 +1250,9 @@ define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin
define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: max_s_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1267,9 +1267,9 @@ define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin
define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: max_u_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1284,9 +1284,9 @@ define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin
define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: max_u_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1301,9 +1301,9 @@ define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin
define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: max_u_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1318,9 +1318,9 @@ define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin
define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: max_u_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1335,7 +1335,7 @@ define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin
define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: maxi_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1350,7 +1350,7 @@ define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: maxi_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1365,7 +1365,7 @@ define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: maxi_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1380,7 +1380,7 @@ define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: maxi_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1395,7 +1395,7 @@ define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: maxi_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1410,7 +1410,7 @@ define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: maxi_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1425,7 +1425,7 @@ define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: maxi_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1440,7 +1440,7 @@ define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: maxi_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1455,7 +1455,7 @@ define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: maxi_s_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1470,7 +1470,7 @@ define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: maxi_s_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1485,7 +1485,7 @@ define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: maxi_s_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1500,7 +1500,7 @@ define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: maxi_s_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1515,7 +1515,7 @@ define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: maxi_u_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1530,7 +1530,7 @@ define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: maxi_u_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1545,7 +1545,7 @@ define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: maxi_u_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1560,7 +1560,7 @@ define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: maxi_u_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1575,9 +1575,9 @@ define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: min_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1592,9 +1592,9 @@ define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: min_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1609,9 +1609,9 @@ define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: min_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1626,9 +1626,9 @@ define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: min_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1643,9 +1643,9 @@ define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: min_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1660,9 +1660,9 @@ define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: min_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1677,9 +1677,9 @@ define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: min_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1694,9 +1694,9 @@ define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: min_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1711,9 +1711,9 @@ define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: min_s_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1728,9 +1728,9 @@ define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin
define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: min_s_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1745,9 +1745,9 @@ define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin
define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: min_s_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1762,9 +1762,9 @@ define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin
define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: min_s_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1779,9 +1779,9 @@ define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin
define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: min_u_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
@@ -1796,9 +1796,9 @@ define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin
define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: min_u_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
@@ -1813,9 +1813,9 @@ define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin
define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: min_u_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
@@ -1830,9 +1830,9 @@ define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin
define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: min_u_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
@@ -1847,7 +1847,7 @@ define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin
define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: mini_s_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1862,7 +1862,7 @@ define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: mini_s_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1877,7 +1877,7 @@ define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: mini_s_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1892,7 +1892,7 @@ define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: mini_s_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1907,7 +1907,7 @@ define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: mini_u_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1922,7 +1922,7 @@ define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: mini_u_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1937,7 +1937,7 @@ define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: mini_u_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -1952,7 +1952,7 @@ define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: mini_u_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -1967,7 +1967,7 @@ define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: mini_s_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -1982,7 +1982,7 @@ define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: mini_s_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1997,7 +1997,7 @@ define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: mini_s_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -2012,7 +2012,7 @@ define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: mini_s_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
@@ -2027,7 +2027,7 @@ define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: mini_u_eq_v16i8:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -2042,7 +2042,7 @@ define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: mini_u_eq_v8i16:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -2057,7 +2057,7 @@ define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: mini_u_eq_v4i32:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -2072,7 +2072,7 @@ define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: mini_u_eq_v2i64:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll
index e93221b..3229d02 100644
--- a/test/CodeGen/Mips/msa/compare_float.ll
+++ b/test/CodeGen/Mips/msa/compare_float.ll
@@ -9,8 +9,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: false_v4f32:
- %1 = load <4 x float>* %a
- %2 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %a
+ %2 = load <4 x float>, <4 x float>* %b
%3 = fcmp false <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
store <4 x i32> %4, <4 x i32>* %c
@@ -25,8 +25,8 @@ define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: false_v2f64:
- %1 = load <2 x double>* %a
- %2 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %a
+ %2 = load <2 x double>, <2 x double>* %b
%3 = fcmp false <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
store <2 x i64> %4, <2 x i64>* %c
@@ -41,9 +41,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun
define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: oeq_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oeq <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -58,9 +58,9 @@ define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: oeq_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oeq <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -75,9 +75,9 @@ define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: oge_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oge <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -92,9 +92,9 @@ define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: oge_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oge <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -109,9 +109,9 @@ define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ogt_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -126,9 +126,9 @@ define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ogt_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -143,9 +143,9 @@ define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ole_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ole <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -160,9 +160,9 @@ define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ole_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ole <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -177,9 +177,9 @@ define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: olt_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp olt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -194,9 +194,9 @@ define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: olt_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp olt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -211,9 +211,9 @@ define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: one_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp one <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -228,9 +228,9 @@ define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: one_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp one <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -245,9 +245,9 @@ define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ord_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ord <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -262,9 +262,9 @@ define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ord_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ord <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -279,9 +279,9 @@ define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ueq_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ueq <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -296,9 +296,9 @@ define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ueq_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ueq <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -313,9 +313,9 @@ define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: uge_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uge <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -330,9 +330,9 @@ define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: uge_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uge <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -347,9 +347,9 @@ define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ugt_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ugt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -364,9 +364,9 @@ define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ugt_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ugt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -381,9 +381,9 @@ define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ule_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ule <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -398,9 +398,9 @@ define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ule_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ule <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -415,9 +415,9 @@ define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: ult_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ult <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -432,9 +432,9 @@ define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: ult_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ult <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -449,9 +449,9 @@ define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: uno_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uno <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
@@ -466,9 +466,9 @@ define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind
define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: uno_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uno <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
@@ -483,8 +483,8 @@ define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi
define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: true_v4f32:
- %1 = load <4 x float>* %a
- %2 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %a
+ %2 = load <4 x float>, <4 x float>* %b
%3 = fcmp true <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
store <4 x i32> %4, <4 x i32>* %c
@@ -499,8 +499,8 @@ define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwin
define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: true_v2f64:
- %1 = load <2 x double>* %a
- %2 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %a
+ %2 = load <2 x double>, <2 x double>* %b
%3 = fcmp true <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
store <2 x i64> %4, <2 x i64>* %c
@@ -516,11 +516,11 @@ define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
<4 x float>* %c) nounwind {
; CHECK: bsel_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x float>* %c
+ %3 = load <4 x float>, <4 x float>* %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -538,11 +538,11 @@ define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
<2 x double>* %c) nounwind {
; CHECK: bsel_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x double>* %c
+ %3 = load <2 x double>, <2 x double>* %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -560,9 +560,9 @@ define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
<4 x float>* %c) nounwind {
; CHECK: bseli_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -580,9 +580,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
<2 x double>* %c) nounwind {
; CHECK: bseli_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
@@ -599,9 +599,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: max_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -615,9 +615,9 @@ define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: max_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -631,9 +631,9 @@ define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou
define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
; CHECK: min_v4f32:
- %1 = load <4 x float>* %a
+ %1 = load <4 x float>, <4 x float>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>* %b
+ %2 = load <4 x float>, <4 x float>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -647,9 +647,9 @@ define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
; CHECK: min_v2f64:
- %1 = load <2 x double>* %a
+ %1 = load <2 x double>, <2 x double>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>* %b
+ %2 = load <2 x double>, <2 x double>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll
index 0dd75fa..2a0d74f 100644
--- a/test/CodeGen/Mips/msa/elm_copy.ll
+++ b/test/CodeGen/Mips/msa/elm_copy.ll
@@ -15,7 +15,7 @@
define void @llvm_mips_copy_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_copy_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_s_b_ARG1
%1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_s_b_RES
ret void
@@ -38,7 +38,7 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_copy_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_copy_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_s_h_ARG1
%1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_s_h_RES
ret void
@@ -61,7 +61,7 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_copy_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_copy_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_s_w_ARG1
%1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_s_w_RES
ret void
@@ -84,7 +84,7 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_copy_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_s_d_ARG1
%1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
store i64 %1, i64* @llvm_mips_copy_s_d_RES
ret void
@@ -112,7 +112,7 @@ declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_copy_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_copy_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_u_b_ARG1
%1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_u_b_RES
ret void
@@ -135,7 +135,7 @@ declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_copy_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_copy_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_u_h_ARG1
%1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_u_h_RES
ret void
@@ -158,7 +158,7 @@ declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_copy_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_copy_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_u_w_ARG1
%1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1)
store i32 %1, i32* @llvm_mips_copy_u_w_RES
ret void
@@ -181,7 +181,7 @@ declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_copy_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_u_d_ARG1
%1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
store i64 %1, i64* @llvm_mips_copy_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll
index c746e52..46e6289 100644
--- a/test/CodeGen/Mips/msa/elm_insv.ll
+++ b/test/CodeGen/Mips/msa/elm_insv.ll
@@ -16,8 +16,8 @@
define void @llvm_mips_insert_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_insert_b_ARG1
- %1 = load i32* @llvm_mips_insert_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insert_b_ARG1
+ %1 = load i32, i32* @llvm_mips_insert_b_ARG3
%2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES
ret void
@@ -38,8 +38,8 @@ declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind
define void @llvm_mips_insert_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_insert_h_ARG1
- %1 = load i32* @llvm_mips_insert_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insert_h_ARG1
+ %1 = load i32, i32* @llvm_mips_insert_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES
ret void
@@ -60,8 +60,8 @@ declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind
define void @llvm_mips_insert_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_insert_w_ARG1
- %1 = load i32* @llvm_mips_insert_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insert_w_ARG1
+ %1 = load i32, i32* @llvm_mips_insert_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES
ret void
@@ -82,8 +82,8 @@ declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind
define void @llvm_mips_insert_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_insert_d_ARG1
- %1 = load i64* @llvm_mips_insert_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insert_d_ARG1
+ %1 = load i64, i64* @llvm_mips_insert_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES
ret void
@@ -110,8 +110,8 @@ declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind
define void @llvm_mips_insve_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_insve_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_insve_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG3
%2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES
ret void
@@ -136,8 +136,8 @@ declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind
define void @llvm_mips_insve_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_insve_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_insve_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES
ret void
@@ -162,8 +162,8 @@ declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind
define void @llvm_mips_insve_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_insve_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_insve_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES
ret void
@@ -188,8 +188,8 @@ declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind
define void @llvm_mips_insve_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_insve_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_insve_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/elm_move.ll b/test/CodeGen/Mips/msa/elm_move.ll
index 98c06c7..9665b6d 100644
--- a/test/CodeGen/Mips/msa/elm_move.ll
+++ b/test/CodeGen/Mips/msa/elm_move.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_move_vb_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_move_vb_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_move_vb_ARG1
%1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0)
store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES
ret void
diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll
index 00a6544..87f15f1 100644
--- a/test/CodeGen/Mips/msa/elm_shift_slide.ll
+++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll
@@ -10,8 +10,8 @@
define void @llvm_mips_sldi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_sldi_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES
ret void
@@ -31,8 +31,8 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_sldi_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_sldi_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES
ret void
@@ -52,8 +52,8 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind
define void @llvm_mips_sldi_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_sldi_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES
ret void
@@ -73,8 +73,8 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind
define void @llvm_mips_sldi_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_sldi_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES
ret void
@@ -93,7 +93,7 @@ declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind
define void @llvm_mips_splati_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splati_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
ret void
@@ -112,7 +112,7 @@ declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind
define void @llvm_mips_splati_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splati_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
ret void
@@ -131,7 +131,7 @@ declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind
define void @llvm_mips_splati_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splati_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
ret void
@@ -150,7 +150,7 @@ declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind
define void @llvm_mips_splati_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splati_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll
index ebec465..afd28ae 100644
--- a/test/CodeGen/Mips/msa/frameindex.ll
+++ b/test/CodeGen/Mips/msa/frameindex.ll
@@ -5,7 +5,7 @@ define void @loadstore_v16i8_near() nounwind {
; MIPS32-AE: loadstore_v16i8_near:
%1 = alloca <16 x i8>
- %2 = load volatile <16 x i8>* %1
+ %2 = load volatile <16 x i8>, <16 x i8>* %1
; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0($sp)
store volatile <16 x i8> %2, <16 x i8>* %1
; MIPS32-AE: st.b [[R1]], 0($sp)
@@ -20,7 +20,7 @@ define void @loadstore_v16i8_just_under_simm10() nounwind {
%1 = alloca <16 x i8>
%2 = alloca [496 x i8] ; Push the frame right up to 512 bytes
- %3 = load volatile <16 x i8>* %1
+ %3 = load volatile <16 x i8>, <16 x i8>* %1
; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 496($sp)
store volatile <16 x i8> %3, <16 x i8>* %1
; MIPS32-AE: st.b [[R1]], 496($sp)
@@ -35,7 +35,7 @@ define void @loadstore_v16i8_just_over_simm10() nounwind {
%1 = alloca <16 x i8>
%2 = alloca [497 x i8] ; Push the frame just over 512 bytes
- %3 = load volatile <16 x i8>* %1
+ %3 = load volatile <16 x i8>, <16 x i8>* %1
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <16 x i8> %3, <16 x i8>* %1
@@ -52,7 +52,7 @@ define void @loadstore_v16i8_just_under_simm16() nounwind {
%1 = alloca <16 x i8>
%2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
- %3 = load volatile <16 x i8>* %1
+ %3 = load volatile <16 x i8>, <16 x i8>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -71,7 +71,7 @@ define void @loadstore_v16i8_just_over_simm16() nounwind {
%1 = alloca <16 x i8>
%2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
- %3 = load volatile <16 x i8>* %1
+ %3 = load volatile <16 x i8>, <16 x i8>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -88,7 +88,7 @@ define void @loadstore_v8i16_near() nounwind {
; MIPS32-AE: loadstore_v8i16_near:
%1 = alloca <8 x i16>
- %2 = load volatile <8 x i16>* %1
+ %2 = load volatile <8 x i16>, <8 x i16>* %1
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0($sp)
store volatile <8 x i16> %2, <8 x i16>* %1
; MIPS32-AE: st.h [[R1]], 0($sp)
@@ -102,11 +102,11 @@ define void @loadstore_v8i16_unaligned() nounwind {
%1 = alloca [2 x <8 x i16>]
%2 = bitcast [2 x <8 x i16>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <8 x i16>]*
- %5 = getelementptr [2 x <8 x i16>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0
- %6 = load volatile <8 x i16>* %5
+ %6 = load volatile <8 x i16>, <8 x i16>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <8 x i16> %6, <8 x i16>* %5
@@ -123,7 +123,7 @@ define void @loadstore_v8i16_just_under_simm10() nounwind {
%1 = alloca <8 x i16>
%2 = alloca [1008 x i8] ; Push the frame right up to 1024 bytes
- %3 = load volatile <8 x i16>* %1
+ %3 = load volatile <8 x i16>, <8 x i16>* %1
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 1008($sp)
store volatile <8 x i16> %3, <8 x i16>* %1
; MIPS32-AE: st.h [[R1]], 1008($sp)
@@ -138,7 +138,7 @@ define void @loadstore_v8i16_just_over_simm10() nounwind {
%1 = alloca <8 x i16>
%2 = alloca [1009 x i8] ; Push the frame just over 1024 bytes
- %3 = load volatile <8 x i16>* %1
+ %3 = load volatile <8 x i16>, <8 x i16>* %1
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <8 x i16> %3, <8 x i16>* %1
@@ -155,7 +155,7 @@ define void @loadstore_v8i16_just_under_simm16() nounwind {
%1 = alloca <8 x i16>
%2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
- %3 = load volatile <8 x i16>* %1
+ %3 = load volatile <8 x i16>, <8 x i16>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -174,7 +174,7 @@ define void @loadstore_v8i16_just_over_simm16() nounwind {
%1 = alloca <8 x i16>
%2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
- %3 = load volatile <8 x i16>* %1
+ %3 = load volatile <8 x i16>, <8 x i16>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -191,7 +191,7 @@ define void @loadstore_v4i32_near() nounwind {
; MIPS32-AE: loadstore_v4i32_near:
%1 = alloca <4 x i32>
- %2 = load volatile <4 x i32>* %1
+ %2 = load volatile <4 x i32>, <4 x i32>* %1
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0($sp)
store volatile <4 x i32> %2, <4 x i32>* %1
; MIPS32-AE: st.w [[R1]], 0($sp)
@@ -205,11 +205,11 @@ define void @loadstore_v4i32_unaligned() nounwind {
%1 = alloca [2 x <4 x i32>]
%2 = bitcast [2 x <4 x i32>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <4 x i32>]*
- %5 = getelementptr [2 x <4 x i32>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0
- %6 = load volatile <4 x i32>* %5
+ %6 = load volatile <4 x i32>, <4 x i32>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <4 x i32> %6, <4 x i32>* %5
@@ -226,7 +226,7 @@ define void @loadstore_v4i32_just_under_simm10() nounwind {
%1 = alloca <4 x i32>
%2 = alloca [2032 x i8] ; Push the frame right up to 2048 bytes
- %3 = load volatile <4 x i32>* %1
+ %3 = load volatile <4 x i32>, <4 x i32>* %1
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 2032($sp)
store volatile <4 x i32> %3, <4 x i32>* %1
; MIPS32-AE: st.w [[R1]], 2032($sp)
@@ -241,7 +241,7 @@ define void @loadstore_v4i32_just_over_simm10() nounwind {
%1 = alloca <4 x i32>
%2 = alloca [2033 x i8] ; Push the frame just over 2048 bytes
- %3 = load volatile <4 x i32>* %1
+ %3 = load volatile <4 x i32>, <4 x i32>* %1
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <4 x i32> %3, <4 x i32>* %1
@@ -258,7 +258,7 @@ define void @loadstore_v4i32_just_under_simm16() nounwind {
%1 = alloca <4 x i32>
%2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
- %3 = load volatile <4 x i32>* %1
+ %3 = load volatile <4 x i32>, <4 x i32>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -277,7 +277,7 @@ define void @loadstore_v4i32_just_over_simm16() nounwind {
%1 = alloca <4 x i32>
%2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
- %3 = load volatile <4 x i32>* %1
+ %3 = load volatile <4 x i32>, <4 x i32>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -294,7 +294,7 @@ define void @loadstore_v2i64_near() nounwind {
; MIPS32-AE: loadstore_v2i64_near:
%1 = alloca <2 x i64>
- %2 = load volatile <2 x i64>* %1
+ %2 = load volatile <2 x i64>, <2 x i64>* %1
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0($sp)
store volatile <2 x i64> %2, <2 x i64>* %1
; MIPS32-AE: st.d [[R1]], 0($sp)
@@ -308,11 +308,11 @@ define void @loadstore_v2i64_unaligned() nounwind {
%1 = alloca [2 x <2 x i64>]
%2 = bitcast [2 x <2 x i64>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <2 x i64>]*
- %5 = getelementptr [2 x <2 x i64>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0
- %6 = load volatile <2 x i64>* %5
+ %6 = load volatile <2 x i64>, <2 x i64>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <2 x i64> %6, <2 x i64>* %5
@@ -329,7 +329,7 @@ define void @loadstore_v2i64_just_under_simm10() nounwind {
%1 = alloca <2 x i64>
%2 = alloca [4080 x i8] ; Push the frame right up to 4096 bytes
- %3 = load volatile <2 x i64>* %1
+ %3 = load volatile <2 x i64>, <2 x i64>* %1
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 4080($sp)
store volatile <2 x i64> %3, <2 x i64>* %1
; MIPS32-AE: st.d [[R1]], 4080($sp)
@@ -344,7 +344,7 @@ define void @loadstore_v2i64_just_over_simm10() nounwind {
%1 = alloca <2 x i64>
%2 = alloca [4081 x i8] ; Push the frame just over 4096 bytes
- %3 = load volatile <2 x i64>* %1
+ %3 = load volatile <2 x i64>, <2 x i64>* %1
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
store volatile <2 x i64> %3, <2 x i64>* %1
@@ -361,7 +361,7 @@ define void @loadstore_v2i64_just_under_simm16() nounwind {
%1 = alloca <2 x i64>
%2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
- %3 = load volatile <2 x i64>* %1
+ %3 = load volatile <2 x i64>, <2 x i64>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
@@ -380,7 +380,7 @@ define void @loadstore_v2i64_just_over_simm16() nounwind {
%1 = alloca <2 x i64>
%2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
- %3 = load volatile <2 x i64>* %1
+ %3 = load volatile <2 x i64>, <2 x i64>* %1
; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
diff --git a/test/CodeGen/Mips/msa/i10.ll b/test/CodeGen/Mips/msa/i10.ll
index c5a9617..204884b 100644
--- a/test/CodeGen/Mips/msa/i10.ll
+++ b/test/CodeGen/Mips/msa/i10.ll
@@ -7,7 +7,7 @@
define i32 @llvm_mips_bnz_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bnz_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_b_ARG1
%1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
@@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind
define i32 @llvm_mips_bnz_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bnz_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnz_h_ARG1
%1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
@@ -49,7 +49,7 @@ declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind
define i32 @llvm_mips_bnz_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bnz_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnz_w_ARG1
%1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
@@ -70,7 +70,7 @@ declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind
define i32 @llvm_mips_bnz_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bnz_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnz_d_ARG1
%1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
diff --git a/test/CodeGen/Mips/msa/i5-a.ll b/test/CodeGen/Mips/msa/i5-a.ll
index 0b50720..f9486b1 100644
--- a/test/CodeGen/Mips/msa/i5-a.ll
+++ b/test/CodeGen/Mips/msa/i5-a.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_addvi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_addvi_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addvi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES
ret void
@@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind
define void @llvm_mips_addvi_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_addvi_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addvi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES
ret void
@@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind
define void @llvm_mips_addvi_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_addvi_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addvi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind
define void @llvm_mips_addvi_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_addvi_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addvi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/i5-b.ll b/test/CodeGen/Mips/msa/i5-b.ll
index da6be66..40ab095 100644
--- a/test/CodeGen/Mips/msa/i5-b.ll
+++ b/test/CodeGen/Mips/msa/i5-b.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_bclri_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bclri_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclri_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES
ret void
@@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind
define void @llvm_mips_bclri_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bclri_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclri_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES
ret void
@@ -48,7 +48,7 @@ declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind
define void @llvm_mips_bclri_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bclri_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclri_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES
ret void
@@ -67,7 +67,7 @@ declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind
define void @llvm_mips_bclri_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bclri_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclri_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES
ret void
@@ -87,8 +87,8 @@ declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind
define void @llvm_mips_binsli_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_binsli_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_binsli_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7)
store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
ret void
@@ -112,8 +112,8 @@ declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_binsli_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_binsli_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_binsli_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7)
store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES
ret void
@@ -137,8 +137,8 @@ declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind
define void @llvm_mips_binsli_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_binsli_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_binsli_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7)
store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES
ret void
@@ -162,8 +162,8 @@ declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind
define void @llvm_mips_binsli_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_binsli_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_binsli_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG2
; TODO: We use a particularly wide mask here to work around a legalization
; issue. If the mask doesn't fit within a 10-bit immediate, it gets
; legalized into a constant pool. We should add a test to cover the
@@ -191,8 +191,8 @@ declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind
define void @llvm_mips_binsri_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_binsri_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_binsri_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7)
store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
ret void
@@ -216,8 +216,8 @@ declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_binsri_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_binsri_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_binsri_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7)
store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES
ret void
@@ -241,8 +241,8 @@ declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind
define void @llvm_mips_binsri_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_binsri_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_binsri_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7)
store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES
ret void
@@ -266,8 +266,8 @@ declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind
define void @llvm_mips_binsri_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_binsri_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_binsri_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7)
store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES
ret void
@@ -290,7 +290,7 @@ declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind
define void @llvm_mips_bnegi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bnegi_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnegi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES
ret void
@@ -309,7 +309,7 @@ declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind
define void @llvm_mips_bnegi_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bnegi_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnegi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES
ret void
@@ -328,7 +328,7 @@ declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind
define void @llvm_mips_bnegi_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bnegi_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnegi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES
ret void
@@ -347,7 +347,7 @@ declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind
define void @llvm_mips_bnegi_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bnegi_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnegi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES
ret void
@@ -366,7 +366,7 @@ declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind
define void @llvm_mips_bseti_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bseti_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseti_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7)
store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES
ret void
@@ -385,7 +385,7 @@ declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind
define void @llvm_mips_bseti_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bseti_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bseti_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7)
store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES
ret void
@@ -404,7 +404,7 @@ declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind
define void @llvm_mips_bseti_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bseti_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bseti_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7)
store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES
ret void
@@ -423,7 +423,7 @@ declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind
define void @llvm_mips_bseti_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bseti_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bseti_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7)
store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/i5-c.ll b/test/CodeGen/Mips/msa/i5-c.ll
index bf1578f..8158250 100644
--- a/test/CodeGen/Mips/msa/i5-c.ll
+++ b/test/CodeGen/Mips/msa/i5-c.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_ceqi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ceqi_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceqi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES
ret void
@@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind
define void @llvm_mips_ceqi_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_ceqi_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceqi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES
ret void
@@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind
define void @llvm_mips_ceqi_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_ceqi_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceqi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind
define void @llvm_mips_ceqi_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_ceqi_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceqi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES
ret void
@@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind
define void @llvm_mips_clei_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clei_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES
ret void
@@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_clei_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clei_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES
ret void
@@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_clei_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clei_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES
ret void
@@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_clei_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clei_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES
ret void
@@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_clei_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clei_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES
ret void
@@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_clei_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clei_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES
ret void
@@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_clei_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clei_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES
ret void
@@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_clei_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clei_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES
ret void
@@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind
define void @llvm_mips_clti_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clti_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES
ret void
@@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_clti_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clti_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES
ret void
@@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_clti_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clti_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES
ret void
@@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_clti_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clti_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES
ret void
@@ -313,7 +313,7 @@ declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_clti_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_clti_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES
ret void
@@ -332,7 +332,7 @@ declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_clti_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_clti_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES
ret void
@@ -351,7 +351,7 @@ declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_clti_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_clti_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES
ret void
@@ -370,7 +370,7 @@ declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_clti_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_clti_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/i5-m.ll b/test/CodeGen/Mips/msa/i5-m.ll
index 2766349..ba6e9d2 100644
--- a/test/CodeGen/Mips/msa/i5-m.ll
+++ b/test/CodeGen/Mips/msa/i5-m.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_maxi_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES
ret void
@@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_maxi_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES
ret void
@@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_maxi_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_maxi_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES
ret void
@@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_maxi_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
ret void
@@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_maxi_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
ret void
@@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_maxi_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
ret void
@@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_maxi_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
ret void
@@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind
define void @llvm_mips_mini_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES
ret void
@@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind
define void @llvm_mips_mini_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES
ret void
@@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind
define void @llvm_mips_mini_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES
ret void
@@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind
define void @llvm_mips_mini_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES
ret void
@@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind
define void @llvm_mips_mini_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
ret void
@@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind
define void @llvm_mips_mini_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
ret void
@@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind
define void @llvm_mips_mini_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
ret void
@@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind
define void @llvm_mips_mini_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/i5-s.ll b/test/CodeGen/Mips/msa/i5-s.ll
index 184172f..db331b1 100644
--- a/test/CodeGen/Mips/msa/i5-s.ll
+++ b/test/CodeGen/Mips/msa/i5-s.ll
@@ -9,7 +9,7 @@
define void @llvm_mips_subvi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
ret void
@@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind
define void @llvm_mips_subvi_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
ret void
@@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind
define void @llvm_mips_subvi_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
ret void
@@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind
define void @llvm_mips_subvi_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/i5_ld_st.ll b/test/CodeGen/Mips/msa/i5_ld_st.ll
index 7cc55f2..991bb84 100644
--- a/test/CodeGen/Mips/msa/i5_ld_st.ll
+++ b/test/CodeGen/Mips/msa/i5_ld_st.ll
@@ -81,7 +81,7 @@ declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
define void @llvm_mips_st_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_st_b_ARG
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
%1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
ret void
@@ -99,7 +99,7 @@ declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
define void @llvm_mips_st_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_st_h_ARG
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
%1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
ret void
@@ -117,7 +117,7 @@ declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
define void @llvm_mips_st_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_st_w_ARG
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
%1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
ret void
@@ -135,7 +135,7 @@ declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
define void @llvm_mips_st_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_st_d_ARG
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
%1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
ret void
diff --git a/test/CodeGen/Mips/msa/i8.ll b/test/CodeGen/Mips/msa/i8.ll
index d2931a7..4af9c58 100644
--- a/test/CodeGen/Mips/msa/i8.ll
+++ b/test/CodeGen/Mips/msa/i8.ll
@@ -8,7 +8,7 @@
define void @llvm_mips_andi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_andi_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_andi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25)
store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES
ret void
@@ -28,8 +28,8 @@ declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind
define void @llvm_mips_bmnzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
ret void
@@ -52,8 +52,8 @@ declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_bmzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bmzi_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bmzi_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES
ret void
@@ -77,8 +77,8 @@ declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_bseli_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bseli_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bseli_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25)
store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES
ret void
@@ -100,7 +100,7 @@ declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind
define void @llvm_mips_nori_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
ret void
@@ -119,7 +119,7 @@ declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind
define void @llvm_mips_ori_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
ret void
@@ -138,7 +138,7 @@ declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind
define void @llvm_mips_shf_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_shf_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
ret void
@@ -157,7 +157,7 @@ declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind
define void @llvm_mips_shf_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_shf_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
ret void
@@ -176,7 +176,7 @@ declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind
define void @llvm_mips_shf_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_shf_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
ret void
@@ -195,7 +195,7 @@ declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind
define void @llvm_mips_xori_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
ret void
diff --git a/test/CodeGen/Mips/msa/inline-asm.ll b/test/CodeGen/Mips/msa/inline-asm.ll
index 4a34273..85da87b 100644
--- a/test/CodeGen/Mips/msa/inline-asm.ll
+++ b/test/CodeGen/Mips/msa/inline-asm.ll
@@ -16,7 +16,7 @@ entry:
define void @test2() nounwind {
entry:
; CHECK-LABEL: test2:
- %0 = load <4 x i32>* @v4i32_r
+ %0 = load <4 x i32>, <4 x i32>* @v4i32_r
%1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0)
; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1
store <4 x i32> %1, <4 x i32>* @v4i32_r
@@ -26,7 +26,7 @@ entry:
define void @test3() nounwind {
entry:
; CHECK-LABEL: test3:
- %0 = load <4 x i32>* @v4i32_r
+ %0 = load <4 x i32>, <4 x i32>* @v4i32_r
%1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0)
; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1
store <4 x i32> %1, <4 x i32>* @v4i32_r
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
index 4beaaa9..beb361b 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca <1 x double>
%A1 = alloca double
%A = alloca i32
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 77, i8* %0
%E = extractelement <8 x i64> zeroinitializer, i32 2
%Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3>
@@ -24,7 +24,7 @@ BB:
br label %CF
CF: ; preds = %CF, %CF78, %BB
- %L5 = load i8* %Sl
+ %L5 = load i8, i8* %Sl
store i8 %L, i8* %Sl
%E6 = extractelement <8 x i32> zeroinitializer, i32 2
%Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
@@ -33,7 +33,7 @@ CF: ; preds = %CF, %CF78, %BB
%FC = sitofp <8 x i64> zeroinitializer to <8 x float>
%Sl9 = select i1 %Cmp, i8 77, i8 77
%Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer
- %L11 = load i8* %0
+ %L11 = load i8, i8* %0
store i8 %Sl9, i8* %0
%E12 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7>
@@ -42,7 +42,7 @@ CF: ; preds = %CF, %CF78, %BB
%Tr = trunc <8 x i64> %Shuff to <8 x i32>
%Sl16 = select i1 %Cmp, i8 77, i8 %5
%Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10
- %L18 = load i8* %Sl
+ %L18 = load i8, i8* %Sl
store i8 -1, i8* %Sl
%E19 = extractelement <8 x i32> zeroinitializer, i32 3
%Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef>
@@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF78, %BB
br i1 %Cmp25, label %CF, label %CF78
CF78: ; preds = %CF
- %L26 = load i8* %Sl
+ %L26 = load i8, i8* %Sl
store i32 50347, i32* %A
%E27 = extractelement <8 x i1> %Cmp10, i32 2
br i1 %E27, label %CF, label %CF77
@@ -65,7 +65,7 @@ CF77: ; preds = %CF77, %CF81, %CF78
%B30 = urem <8 x i32> %Tr, zeroinitializer
%Tr31 = trunc i32 0 to i16
%Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
- %L33 = load i8* %Sl
+ %L33 = load i8, i8* %Sl
store i8 %L26, i8* %Sl
%E34 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef
@@ -73,7 +73,7 @@ CF77: ; preds = %CF77, %CF81, %CF78
%B37 = srem <1 x i16> %I29, zeroinitializer
%FC38 = sitofp <8 x i32> %B30 to <8 x double>
%Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24
- %L40 = load i8* %Sl
+ %L40 = load i8, i8* %Sl
store i8 %Sl16, i8* %Sl
%E41 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12>
@@ -85,7 +85,7 @@ CF77: ; preds = %CF77, %CF81, %CF78
br i1 %Cmp46, label %CF77, label %CF80
CF80: ; preds = %CF80, %CF77
- %L47 = load i64* %PC
+ %L47 = load i64, i64* %PC
store i8 77, i8* %Sl
%E48 = extractelement <8 x i64> zeroinitializer, i32 2
%Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3>
@@ -97,7 +97,7 @@ CF80: ; preds = %CF80, %CF77
br i1 %Cmp54, label %CF80, label %CF81
CF81: ; preds = %CF80
- %L55 = load i8* %Sl
+ %L55 = load i8, i8* %Sl
store i8 %Sl16, i8* %Sl
%E56 = extractelement <1 x i16> %B, i32 0
%Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1>
@@ -105,7 +105,7 @@ CF81: ; preds = %CF80
%B59 = srem i32 %E19, %E19
%Sl60 = select i1 %Cmp, i8 77, i8 77
%Cmp61 = icmp ult <1 x i16> zeroinitializer, %B
- %L62 = load i8* %Sl
+ %L62 = load i8, i8* %Sl
store i64 %L47, i64* %PC52
%E63 = extractelement <4 x i32> %I43, i32 2
%Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
@@ -117,7 +117,7 @@ CF81: ; preds = %CF80
br i1 %Cmp69, label %CF77, label %CF79
CF79: ; preds = %CF81
- %L70 = load i32* %A
+ %L70 = load i32, i32* %A
store i64 %4, i64* %PC
%E71 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9>
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
index f9cab03..bdf6eaf 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca i64
%A1 = alloca i32
%A = alloca <2 x i64>
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 -1, i8* %0
%E = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -22,7 +22,7 @@ BB:
%B = lshr i8 %L, -69
%ZE = fpext float 0xBF2AA5FE80000000 to double
%Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1>
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 -69, i8* %0
%E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
%Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -31,7 +31,7 @@ BB:
%FC = uitofp i32 %3 to double
%Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer
%Cmp = icmp ne <1 x i64> %I, <i64 -1>
- %L11 = load i8* %0
+ %L11 = load i8, i8* %0
store i8 %L11, i8* %0
%E12 = extractelement <1 x i64> <i64 -1>, i32 0
%Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1>
@@ -42,7 +42,7 @@ BB:
br label %CF74
CF74: ; preds = %CF74, %CF80, %CF76, %BB
- %L18 = load i8* %0
+ %L18 = load i8, i8* %0
store i8 -69, i8* %0
%E19 = extractelement <1 x i64> %Sl, i32 0
%Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
@@ -50,7 +50,7 @@ CF74: ; preds = %CF74, %CF80, %CF76,
%B22 = urem i32 135673, %3
%FC23 = sitofp i8 %L to float
%Sl24 = select i1 true, i8 %B, i8 %L18
- %L25 = load i8* %0
+ %L25 = load i8, i8* %0
store i8 %L, i8* %0
%E26 = extractelement <2 x i32> %Shuff, i32 1
%Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0>
@@ -62,7 +62,7 @@ CF74: ; preds = %CF74, %CF80, %CF76,
br i1 %Cmp31, label %CF74, label %CF80
CF80: ; preds = %CF74
- %L32 = load i8* %0
+ %L32 = load i8, i8* %0
store i8 -1, i8* %0
%E33 = extractelement <2 x i32> zeroinitializer, i32 1
%Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
@@ -70,7 +70,7 @@ CF80: ; preds = %CF74
%FC36 = sitofp <1 x i1> %Cmp to <1 x float>
%Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27
- %L39 = load i8* %0
+ %L39 = load i8, i8* %0
store i8 %Sl24, i8* %0
%E40 = extractelement <8 x i64> zeroinitializer, i32 1
%Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2>
@@ -81,7 +81,7 @@ CF80: ; preds = %CF74
br i1 %Cmp45, label %CF74, label %CF76
CF76: ; preds = %CF80
- %L46 = load i8* %0
+ %L46 = load i8, i8* %0
store i8 %L39, i8* %0
%E47 = extractelement <2 x i32> %Shuff27, i32 0
%Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1>
@@ -92,7 +92,7 @@ CF76: ; preds = %CF80
br i1 %Cmp52, label %CF74, label %CF75
CF75: ; preds = %CF75, %CF76
- %L53 = load i8* %0
+ %L53 = load i8, i8* %0
store i8 %L18, i8* %0
%E54 = extractelement <8 x i8> %Shuff20, i32 5
%Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
@@ -103,7 +103,7 @@ CF75: ; preds = %CF75, %CF76
br i1 %Cmp59, label %CF75, label %CF78
CF78: ; preds = %CF75
- %L60 = load i8* %0
+ %L60 = load i8, i8* %0
store i8 -69, i8* %0
%E61 = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3>
@@ -115,7 +115,7 @@ CF78: ; preds = %CF75
br label %CF
CF: ; preds = %CF, %CF78
- %L68 = load i8* %0
+ %L68 = load i8, i8* %0
store i64 %B57, i64* %2
%E69 = extractelement <2 x i1> %Shuff41, i32 1
br i1 %E69, label %CF, label %CF77
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
index e14f405..8f23a8c 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
@@ -13,7 +13,7 @@ BB:
%A2 = alloca i8
%A1 = alloca i32
%A = alloca i8
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 %5, i8* %0
%E = extractelement <2 x i16> zeroinitializer, i32 0
%Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef
@@ -25,7 +25,7 @@ BB:
br label %CF83
CF83: ; preds = %BB
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 85, i8* %0
%E6 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -37,7 +37,7 @@ CF83: ; preds = %BB
br label %CF
CF: ; preds = %CF, %CF81, %CF83
- %L13 = load i8* %0
+ %L13 = load i8, i8* %0
store i8 0, i8* %0
%E14 = extractelement <2 x i64> zeroinitializer, i32 0
%Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
@@ -52,7 +52,7 @@ CF80: ; preds = %CF80, %CF
br i1 %Cmp19, label %CF80, label %CF81
CF81: ; preds = %CF80
- %L20 = load i8* %0
+ %L20 = load i8, i8* %0
store i8 85, i8* %0
%E21 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer
@@ -60,7 +60,7 @@ CF81: ; preds = %CF80
%FC24 = fptoui <4 x float> %FC to <4 x i16>
%Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1>
%Cmp26 = icmp ult <4 x i64> %I16, %Shuff15
- %L27 = load i8* %0
+ %L27 = load i8, i8* %0
store i8 %L, i8* %0
%E28 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9>
@@ -68,7 +68,7 @@ CF81: ; preds = %CF80
%B31 = mul i8 %E28, 85
%PC = bitcast i32* %A3 to i32*
%Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000
- %L33 = load i32* %PC
+ %L33 = load i32, i32* %PC
store i32 %L33, i32* %PC
%E34 = extractelement <2 x i16> zeroinitializer, i32 1
%Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer
@@ -79,7 +79,7 @@ CF81: ; preds = %CF80
br i1 %Cmp39, label %CF, label %CF77
CF77: ; preds = %CF77, %CF81
- %L40 = load i32* %PC
+ %L40 = load i32, i32* %PC
store i32 %3, i32* %PC
%E41 = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -88,7 +88,7 @@ CF77: ; preds = %CF77, %CF81
%Se = sext i32 %3 to i64
%Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43
%Cmp46 = icmp sge <1 x i8> %I36, %Shuff
- %L47 = load i32* %PC
+ %L47 = load i32, i32* %PC
store i32 %L33, i32* %PC
%E48 = extractelement <2 x i16> zeroinitializer, i32 0
%Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
@@ -100,7 +100,7 @@ CF77: ; preds = %CF77, %CF81
br i1 %Cmp54, label %CF77, label %CF78
CF78: ; preds = %CF78, %CF77
- %L55 = load i32* %PC
+ %L55 = load i32, i32* %PC
store i32 %L33, i32* %PC
%E56 = extractelement <8 x i16> %Shuff29, i32 4
%Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
@@ -111,7 +111,7 @@ CF78: ; preds = %CF78, %CF77
br i1 %Cmp60, label %CF78, label %CF79
CF79: ; preds = %CF79, %CF78
- %L61 = load i32* %PC
+ %L61 = load i32, i32* %PC
store i32 %L33, i32* %A3
%E62 = extractelement <4 x i64> %Shuff15, i32 1
%Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6>
@@ -123,7 +123,7 @@ CF79: ; preds = %CF79, %CF78
br i1 %Cmp68, label %CF79, label %CF82
CF82: ; preds = %CF79
- %L69 = load i32* %PC
+ %L69 = load i32, i32* %PC
store i32 %L33, i32* %PC
%E70 = extractelement <8 x i16> zeroinitializer, i32 3
%Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4>
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
index 1a03e55..e3cf796 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca i64
%A1 = alloca i64
%A = alloca double
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 -101, i8* %0
%E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
%Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1>
@@ -22,7 +22,7 @@ BB:
%B = and i64 116376, 57247
%FC = uitofp i8 7 to double
%Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 %L, i8* %0
%E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3
%Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
@@ -33,7 +33,7 @@ BB:
br label %CF
CF: ; preds = %CF, %BB
- %L11 = load i8* %0
+ %L11 = load i8, i8* %0
store i8 -87, i8* %0
%E12 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5>
@@ -45,7 +45,7 @@ CF: ; preds = %CF, %BB
br i1 %Cmp18, label %CF, label %CF80
CF80: ; preds = %CF80, %CF88, %CF
- %L19 = load i8* %0
+ %L19 = load i8, i8* %0
store i8 -101, i8* %0
%E20 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
@@ -56,7 +56,7 @@ CF80: ; preds = %CF80, %CF88, %CF
br i1 %Cmp25, label %CF80, label %CF83
CF83: ; preds = %CF83, %CF80
- %L26 = load i8* %0
+ %L26 = load i8, i8* %0
store i8 -87, i8* %0
%E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
%Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
@@ -68,7 +68,7 @@ CF83: ; preds = %CF83, %CF80
br i1 %Cmp33, label %CF83, label %CF88
CF88: ; preds = %CF83
- %L34 = load i8* %0
+ %L34 = load i8, i8* %0
store i8 -87, i8* %0
%E35 = extractelement <8 x i64> %Shuff, i32 7
%Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
@@ -80,7 +80,7 @@ CF88: ; preds = %CF83
br i1 %Cmp40, label %CF80, label %CF81
CF81: ; preds = %CF81, %CF85, %CF87, %CF88
- %L41 = load i8* %0
+ %L41 = load i8, i8* %0
store i8 %L34, i8* %0
%E42 = extractelement <8 x i64> %Shuff13, i32 6
%Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7>
@@ -92,7 +92,7 @@ CF81: ; preds = %CF81, %CF85, %CF87,
br i1 %Cmp47, label %CF81, label %CF85
CF85: ; preds = %CF81
- %L48 = load i8* %0
+ %L48 = load i8, i8* %0
store i8 -101, i8* %0
%E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2
%Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
@@ -101,7 +101,7 @@ CF85: ; preds = %CF81
%FC53 = uitofp i8 %L48 to double
%Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24
%Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer
- %L56 = load i8* %0
+ %L56 = load i8, i8* %0
store i8 %L11, i8* %0
%E57 = extractelement <4 x i64> %Shuff21, i32 1
%Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2>
@@ -113,7 +113,7 @@ CF85: ; preds = %CF81
CF84: ; preds = %CF84, %CF85
%Sl62 = select i1 false, i8 %L, i8 %L48
%Cmp63 = icmp ne <8 x i64> %I, zeroinitializer
- %L64 = load i8* %0
+ %L64 = load i8, i8* %0
store i8 %5, i8* %0
%E65 = extractelement <8 x i1> %Cmp55, i32 0
br i1 %E65, label %CF84, label %CF87
@@ -125,7 +125,7 @@ CF87: ; preds = %CF84
%ZE69 = zext <8 x i8> %Sl32 to <8 x i64>
%Sl70 = select i1 %Tr61, i64 %E20, i64 %E12
%Cmp71 = icmp slt <8 x i64> %I, %Shuff
- %L72 = load i8* %0
+ %L72 = load i8, i8* %0
store i8 %L72, i8* %0
%E73 = extractelement <8 x i1> %Cmp55, i32 6
br i1 %E73, label %CF81, label %CF82
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
index 96547d9..6f33810 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca double
%A1 = alloca float
%A = alloca double
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 -123, i8* %0
%E = extractelement <4 x i64> zeroinitializer, i32 1
%Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -22,7 +22,7 @@ BB:
%BC = bitcast i64 181325 to double
%Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer
%Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 %L, i8* %0
%E6 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0>
@@ -33,7 +33,7 @@ BB:
br label %CF80
CF80: ; preds = %BB
- %L11 = load i8* %0
+ %L11 = load i8, i8* %0
store i8 -123, i8* %0
%E12 = extractelement <2 x i16> zeroinitializer, i32 1
%Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -42,7 +42,7 @@ CF80: ; preds = %BB
%PC = bitcast i1* %A4 to i64*
%Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
%Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16
- %L18 = load double* %A2
+ %L18 = load double, double* %A2
store i64 498254, i64* %PC
%E19 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
@@ -51,7 +51,7 @@ CF80: ; preds = %BB
%ZE = zext <2 x i1> %Shuff20 to <2 x i32>
%Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer
%Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer
- %L25 = load i8* %0
+ %L25 = load i8, i8* %0
store i8 %L25, i8* %0
%E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3
%Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4>
@@ -63,7 +63,7 @@ CF80: ; preds = %BB
CF79: ; preds = %CF80
%Sl30 = select i1 false, i8 %B29, i8 -123
%Cmp31 = icmp sge <2 x i1> %I, %I
- %L32 = load i64* %PC
+ %L32 = load i64, i64* %PC
store i8 -123, i8* %0
%E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2
%Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
@@ -75,7 +75,7 @@ CF79: ; preds = %CF80
br label %CF
CF: ; preds = %CF, %CF79
- %L40 = load double* %A
+ %L40 = load double, double* %A
store i1 %Cmp39, i1* %PC37
%E41 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef>
@@ -90,7 +90,7 @@ CF77: ; preds = %CF77, %CF
br i1 %Cmp46, label %CF77, label %CF78
CF78: ; preds = %CF78, %CF83, %CF82, %CF77
- %L47 = load i64* %PC
+ %L47 = load i64, i64* %PC
store i8 -123, i8* %0
%E48 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
@@ -105,7 +105,7 @@ CF83: ; preds = %CF78
br i1 %Cmp54, label %CF78, label %CF82
CF82: ; preds = %CF83
- %L55 = load i64* %PC
+ %L55 = load i64, i64* %PC
store i64 %L32, i64* %PC
%E56 = extractelement <2 x i16> %Shuff7, i32 1
%Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
@@ -114,7 +114,7 @@ CF82: ; preds = %CF83
%FC = sitofp i64 498254 to double
%Sl60 = select i1 false, i64 %E6, i64 -1
%Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43
- %L62 = load i64* %PC
+ %L62 = load i64, i64* %PC
store i64 %Sl9, i64* %PC
%E63 = extractelement <2 x i32> %ZE, i32 0
%Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
@@ -126,7 +126,7 @@ CF82: ; preds = %CF83
CF81: ; preds = %CF82
%Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
- %L70 = load i8* %0
+ %L70 = load i8, i8* %0
store i64 %L55, i64* %PC
%E71 = extractelement <4 x i32> %Shuff49, i32 1
%Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
index bef75f3..181f72a 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca float
%A1 = alloca double
%A = alloca double
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 97, i8* %0
%E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
%Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -22,7 +22,7 @@ BB:
%Tr = trunc <1 x i64> zeroinitializer to <1 x i8>
%Sl = select i1 false, double* %A1, double* %A
%Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer
- %L5 = load double* %Sl
+ %L5 = load double, double* %Sl
store float -4.374162e+06, float* %A2
%E6 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
@@ -34,7 +34,7 @@ BB:
br label %CF72
CF72: ; preds = %CF72, %CF80, %CF78, %BB
- %L11 = load double* %Sl
+ %L11 = load double, double* %Sl
store double 0.000000e+00, double* %Sl
%E12 = extractelement <2 x i1> zeroinitializer, i32 0
br i1 %E12, label %CF72, label %CF80
@@ -49,7 +49,7 @@ CF80: ; preds = %CF72
br i1 %Cmp17, label %CF72, label %CF77
CF77: ; preds = %CF77, %CF80
- %L18 = load double* %Sl
+ %L18 = load double, double* %Sl
store double 0.000000e+00, double* %Sl
%E19 = extractelement <2 x i1> zeroinitializer, i32 0
br i1 %E19, label %CF77, label %CF78
@@ -60,7 +60,7 @@ CF78: ; preds = %CF77
%B22 = sdiv <4 x i64> %Shuff7, zeroinitializer
%FC = uitofp i8 97 to double
%Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
- %L24 = load double* %Sl
+ %L24 = load double, double* %Sl
store float %Sl16, float* %PC
%E25 = extractelement <2 x i1> %Shuff, i32 1
br i1 %E25, label %CF72, label %CF76
@@ -71,7 +71,7 @@ CF76: ; preds = %CF78
%B28 = mul <4 x i64> %I27, zeroinitializer
%ZE = zext <8 x i1> zeroinitializer to <8 x i64>
%Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06
- %L30 = load i8* %0
+ %L30 = load i8, i8* %0
store double %L5, double* %Sl
%E31 = extractelement <8 x i1> zeroinitializer, i32 5
br label %CF
@@ -85,7 +85,7 @@ CF: ; preds = %CF, %CF81, %CF76
br i1 %Cmp36, label %CF, label %CF74
CF74: ; preds = %CF74, %CF
- %L37 = load float* %PC
+ %L37 = load float, float* %PC
store double 0.000000e+00, double* %Sl
%E38 = extractelement <2 x i1> %Sl23, i32 1
br i1 %E38, label %CF74, label %CF75
@@ -95,7 +95,7 @@ CF75: ; preds = %CF75, %CF82, %CF74
%I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2
%Sl41 = select i1 %Cmp10, i32 0, i32 %3
%Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer
- %L43 = load double* %Sl
+ %L43 = load double, double* %Sl
store i64 %4, i64* %2
%E44 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E44, label %CF75, label %CF82
@@ -109,7 +109,7 @@ CF82: ; preds = %CF75
br i1 %Cmp49, label %CF75, label %CF81
CF81: ; preds = %CF82
- %L50 = load i8* %0
+ %L50 = load i8, i8* %0
store double %L43, double* %Sl
%E51 = extractelement <4 x i64> %Shuff7, i32 3
%Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
@@ -117,7 +117,7 @@ CF81: ; preds = %CF82
%B54 = fdiv double %L24, %L43
%BC55 = bitcast <4 x i64> zeroinitializer to <4 x double>
%Sl56 = select i1 false, i8 %5, i8 97
- %L57 = load i8* %0
+ %L57 = load i8, i8* %0
store i8 %L50, i8* %0
%E58 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E58, label %CF, label %CF73
@@ -129,7 +129,7 @@ CF73: ; preds = %CF73, %CF81
%PC62 = bitcast double* %A3 to float*
%Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer
%Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff
- %L65 = load double* %A1
+ %L65 = load double, double* %A1
store float -4.374162e+06, float* %PC62
%E66 = extractelement <8 x i1> %I21, i32 3
br i1 %E66, label %CF73, label %CF79
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
index 697871d..c0bc905 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
@@ -14,7 +14,7 @@ BB:
%A2 = alloca <1 x double>
%A1 = alloca <8 x double>
%A = alloca i64
- %L = load i8* %0
+ %L = load i8, i8* %0
store i64 33695, i64* %A
%E = extractelement <4 x i32> zeroinitializer, i32 3
%Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0>
@@ -22,7 +22,7 @@ BB:
%B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%ZE = fpext float 0x3B64A2B880000000 to double
%Sl = select i1 true, i16 -1, i16 -11642
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 0, i8* %0
%E6 = extractelement <4 x i32> zeroinitializer, i32 2
%Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef>
@@ -31,7 +31,7 @@ BB:
%BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float>
%Sl10 = select i1 true, i32* %1, i32* %1
%Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer
- %L11 = load i32* %Sl10
+ %L11 = load i32, i32* %Sl10
store <1 x double> zeroinitializer, <1 x double>* %A2
%E12 = extractelement <4 x i16> zeroinitializer, i32 0
%Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef
@@ -43,7 +43,7 @@ BB:
br label %CF75
CF75: ; preds = %CF75, %BB
- %L19 = load i32* %Sl10
+ %L19 = load i32, i32* %Sl10
store i32 %L11, i32* %Sl10
%E20 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -55,7 +55,7 @@ CF75: ; preds = %CF75, %BB
br i1 %Cmp26, label %CF75, label %CF76
CF76: ; preds = %CF75
- %L27 = load i32* %Sl10
+ %L27 = load i32, i32* %Sl10
store i32 439732, i32* %Sl10
%E28 = extractelement <4 x i32> %Shuff21, i32 3
%Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0>
@@ -65,7 +65,7 @@ CF76: ; preds = %CF75
br label %CF74
CF74: ; preds = %CF74, %CF80, %CF78, %CF76
- %L33 = load i64* %2
+ %L33 = load i64, i64* %2
store i32 71140, i32* %Sl10
%E34 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef
@@ -76,7 +76,7 @@ CF74: ; preds = %CF74, %CF80, %CF78,
br i1 %Cmp39, label %CF74, label %CF80
CF80: ; preds = %CF74
- %L40 = load i8* %0
+ %L40 = load i8, i8* %0
store i32 0, i32* %Sl10
%E41 = extractelement <8 x i64> zeroinitializer, i32 1
%Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef
@@ -86,7 +86,7 @@ CF80: ; preds = %CF74
br i1 %Sl44, label %CF74, label %CF78
CF78: ; preds = %CF80
- %L45 = load i32* %Sl10
+ %L45 = load i32, i32* %Sl10
store i8 %L5, i8* %0
%E46 = extractelement <8 x i1> %Shuff7, i32 2
br i1 %E46, label %CF74, label %CF77
@@ -101,7 +101,7 @@ CF77: ; preds = %CF77, %CF78
br i1 %Cmp52, label %CF77, label %CF79
CF79: ; preds = %CF77
- %L53 = load i32* %Sl10
+ %L53 = load i32, i32* %Sl10
store i8 %L40, i8* %0
%E54 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2>
@@ -109,7 +109,7 @@ CF79: ; preds = %CF77
%Tr = trunc <1 x i64> %Shuff13 to <1 x i16>
%Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>
%Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56
- %L59 = load i8* %0
+ %L59 = load i8, i8* %0
store <1 x double> zeroinitializer, <1 x double>* %A2
%E60 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
@@ -121,7 +121,7 @@ CF79: ; preds = %CF77
br label %CF
CF: ; preds = %CF79
- %L66 = load i32* %Sl10
+ %L66 = load i32, i32* %Sl10
store i32 %E6, i32* %PC
%E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
%Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef>
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
index dc4200a..a3150e9 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
@@ -14,14 +14,14 @@ BB:
%A2 = alloca <4 x i1>
%A1 = alloca <4 x i16>
%A = alloca <2 x i32>
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 %L, i8* %0
%E = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
%I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
%FC = sitofp <4 x i32> zeroinitializer to <4 x double>
%Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
store i8 %5, i8* %0
%E6 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef>
@@ -30,7 +30,7 @@ BB:
%FC9 = fptoui float 0x406DB70180000000 to i64
%Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer
- %L11 = load i8* %0
+ %L11 = load i8, i8* %0
store i8 %L, i8* %0
%E12 = extractelement <4 x i64> zeroinitializer, i32 2
%Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3>
@@ -42,7 +42,7 @@ BB:
br label %CF
CF: ; preds = %CF, %CF79, %CF84, %BB
- %L18 = load i8* %0
+ %L18 = load i8, i8* %0
store i8 %L, i8* %0
%E19 = extractelement <4 x i64> %Sl, i32 3
%Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0>
@@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF79, %CF84, %
br i1 %Cmp25, label %CF, label %CF79
CF79: ; preds = %CF
- %L26 = load i8* %0
+ %L26 = load i8, i8* %0
store i8 %L26, i8* %0
%E27 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
@@ -65,7 +65,7 @@ CF79: ; preds = %CF
br i1 %Cmp32, label %CF, label %CF78
CF78: ; preds = %CF78, %CF79
- %L33 = load i8* %0
+ %L33 = load i8, i8* %0
store i8 %L, i8* %0
%E34 = extractelement <16 x i32> %Shuff28, i32 1
%Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2>
@@ -76,7 +76,7 @@ CF78: ; preds = %CF78, %CF79
br i1 %Cmp38, label %CF78, label %CF80
CF80: ; preds = %CF80, %CF82, %CF78
- %L39 = load i8* %0
+ %L39 = load i8, i8* %0
store i8 %L, i8* %0
%E40 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E40, label %CF80, label %CF82
@@ -87,7 +87,7 @@ CF82: ; preds = %CF80
%B43 = sub i32 %E, 0
%Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28
%Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21
- %L46 = load i8* %0
+ %L46 = load i8, i8* %0
store i8 %L11, i8* %0
%E47 = extractelement <8 x i32> %Sl16, i32 4
%Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1>
@@ -99,7 +99,7 @@ CF82: ; preds = %CF80
CF81: ; preds = %CF81, %CF82
%Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000
%Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1>
- %L54 = load i8* %0
+ %L54 = load i8, i8* %0
store i8 %L5, i8* %0
%E55 = extractelement <8 x i32> zeroinitializer, i32 7
%Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0>
@@ -108,7 +108,7 @@ CF81: ; preds = %CF81, %CF82
%FC59 = fptoui <4 x double> %I36 to <4 x i16>
%Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57
%Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
- %L62 = load i8* %0
+ %L62 = load i8, i8* %0
store i8 %L33, i8* %0
%E63 = extractelement <4 x i64> %Shuff, i32 2
%Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef>
@@ -126,7 +126,7 @@ CF84: ; preds = %CF83
br i1 %Cmp69, label %CF, label %CF77
CF77: ; preds = %CF84
- %L70 = load i8* %0
+ %L70 = load i8, i8* %0
store i8 %L, i8* %0
%E71 = extractelement <4 x i64> %Shuff, i32 0
%Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll
index faeec5d..7feed92 100644
--- a/test/CodeGen/Mips/msa/shuffle.ll
+++ b/test/CodeGen/Mips/msa/shuffle.ll
@@ -4,7 +4,7 @@
define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -20,7 +20,7 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_1:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
@@ -34,8 +34,8 @@ define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_2:
- %1 = load <16 x i8>* %a
- %2 = load <16 x i8>* %b
+ %1 = load <16 x i8>, <16 x i8>* %a
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -51,9 +51,9 @@ define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_3:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -71,7 +71,7 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_4:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
@@ -85,7 +85,7 @@ define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -101,7 +101,7 @@ define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_1:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
@@ -115,8 +115,8 @@ define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_2:
- %1 = load <8 x i16>* %a
- %2 = load <8 x i16>* %b
+ %1 = load <8 x i16>, <8 x i16>* %a
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -132,9 +132,9 @@ define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_3:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -152,7 +152,7 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_4:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
@@ -169,7 +169,7 @@ define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
@@ -183,7 +183,7 @@ define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_1:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
@@ -197,8 +197,8 @@ define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_2:
- %1 = load <4 x i32>* %a
- %2 = load <4 x i32>* %b
+ %1 = load <4 x i32>, <4 x i32>* %a
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36
@@ -212,9 +212,9 @@ define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_3:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -232,7 +232,7 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_4:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
@@ -246,7 +246,7 @@ define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -262,7 +262,7 @@ define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_1:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
@@ -276,8 +276,8 @@ define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_2:
- %1 = load <2 x i64>* %a
- %2 = load <2 x i64>* %b
+ %1 = load <2 x i64>, <2 x i64>* %a
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -293,9 +293,9 @@ define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_3:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -313,7 +313,7 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_4:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
@@ -327,7 +327,7 @@ define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: shf_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12>
; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45
@@ -341,7 +341,7 @@ define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: shf_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27
@@ -355,7 +355,7 @@ define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: shf_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
@@ -371,9 +371,9 @@ define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: ilvev_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -388,9 +388,9 @@ define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: ilvev_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -404,9 +404,9 @@ define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: ilvev_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -420,9 +420,9 @@ define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: ilvev_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -436,9 +436,9 @@ define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: ilvod_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -453,9 +453,9 @@ define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: ilvod_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -469,9 +469,9 @@ define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: ilvod_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -485,9 +485,9 @@ define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: ilvod_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -501,9 +501,9 @@ define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: ilvl_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -518,9 +518,9 @@ define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: ilvl_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -534,9 +534,9 @@ define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: ilvl_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -550,9 +550,9 @@ define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: ilvl_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; ilvl.d and ilvev.d are equivalent for v2i64
@@ -567,9 +567,9 @@ define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: ilvr_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -584,9 +584,9 @@ define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: ilvr_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -600,9 +600,9 @@ define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: ilvr_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -616,9 +616,9 @@ define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: ilvr_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; ilvr.d and ilvod.d are equivalent for v2i64
@@ -633,9 +633,9 @@ define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: pckev_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -650,9 +650,9 @@ define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: pckev_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -666,9 +666,9 @@ define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: pckev_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -682,9 +682,9 @@ define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: pckev_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; pckev.d and ilvev.d are equivalent for v2i64
@@ -699,9 +699,9 @@ define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: pckod_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>* %b
+ %2 = load <16 x i8>, <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -716,9 +716,9 @@ define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: pckod_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>* %b
+ %2 = load <8 x i16>, <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -732,9 +732,9 @@ define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: pckod_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>* %b
+ %2 = load <4 x i32>, <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
@@ -748,9 +748,9 @@ define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: pckod_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>* %b
+ %2 = load <2 x i64>, <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; pckod.d and ilvod.d are equivalent for v2i64
@@ -765,7 +765,7 @@ define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
; CHECK: splati_v16i8_0:
- %1 = load <16 x i8>* %a
+ %1 = load <16 x i8>, <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef,
<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -780,7 +780,7 @@ define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
; CHECK: splati_v8i16_0:
- %1 = load <8 x i16>* %a
+ %1 = load <8 x i16>, <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4]
@@ -794,7 +794,7 @@ define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
; CHECK: splati_v4i32_0:
- %1 = load <4 x i32>* %a
+ %1 = load <4 x i32>, <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
; shf.w and splati.w are equivalent
@@ -809,7 +809,7 @@ define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind {
; CHECK: splati_v2i64_0:
- %1 = load <2 x i64>* %a
+ %1 = load <2 x i64>, <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
diff --git a/test/CodeGen/Mips/msa/spill.ll b/test/CodeGen/Mips/msa/spill.ll
index 66f896a..8c9a799 100644
--- a/test/CodeGen/Mips/msa/spill.ll
+++ b/test/CodeGen/Mips/msa/spill.ll
@@ -6,73 +6,73 @@
define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
entry:
- %p1 = getelementptr <16 x i8>* %p0, i32 1
- %p2 = getelementptr <16 x i8>* %p0, i32 2
- %p3 = getelementptr <16 x i8>* %p0, i32 3
- %p4 = getelementptr <16 x i8>* %p0, i32 4
- %p5 = getelementptr <16 x i8>* %p0, i32 5
- %p6 = getelementptr <16 x i8>* %p0, i32 6
- %p7 = getelementptr <16 x i8>* %p0, i32 7
- %p8 = getelementptr <16 x i8>* %p0, i32 8
- %p9 = getelementptr <16 x i8>* %p0, i32 9
- %p10 = getelementptr <16 x i8>* %p0, i32 10
- %p11 = getelementptr <16 x i8>* %p0, i32 11
- %p12 = getelementptr <16 x i8>* %p0, i32 12
- %p13 = getelementptr <16 x i8>* %p0, i32 13
- %p14 = getelementptr <16 x i8>* %p0, i32 14
- %p15 = getelementptr <16 x i8>* %p0, i32 15
- %p16 = getelementptr <16 x i8>* %p0, i32 16
- %p17 = getelementptr <16 x i8>* %p0, i32 17
- %p18 = getelementptr <16 x i8>* %p0, i32 18
- %p19 = getelementptr <16 x i8>* %p0, i32 19
- %p20 = getelementptr <16 x i8>* %p0, i32 20
- %p21 = getelementptr <16 x i8>* %p0, i32 21
- %p22 = getelementptr <16 x i8>* %p0, i32 22
- %p23 = getelementptr <16 x i8>* %p0, i32 23
- %p24 = getelementptr <16 x i8>* %p0, i32 24
- %p25 = getelementptr <16 x i8>* %p0, i32 25
- %p26 = getelementptr <16 x i8>* %p0, i32 26
- %p27 = getelementptr <16 x i8>* %p0, i32 27
- %p28 = getelementptr <16 x i8>* %p0, i32 28
- %p29 = getelementptr <16 x i8>* %p0, i32 29
- %p30 = getelementptr <16 x i8>* %p0, i32 30
- %p31 = getelementptr <16 x i8>* %p0, i32 31
- %p32 = getelementptr <16 x i8>* %p0, i32 32
- %p33 = getelementptr <16 x i8>* %p0, i32 33
- %0 = load <16 x i8>* %p0, align 16
- %1 = load <16 x i8>* %p1, align 16
- %2 = load <16 x i8>* %p2, align 16
- %3 = load <16 x i8>* %p3, align 16
- %4 = load <16 x i8>* %p4, align 16
- %5 = load <16 x i8>* %p5, align 16
- %6 = load <16 x i8>* %p6, align 16
- %7 = load <16 x i8>* %p7, align 16
- %8 = load <16 x i8>* %p8, align 16
- %9 = load <16 x i8>* %p9, align 16
- %10 = load <16 x i8>* %p10, align 16
- %11 = load <16 x i8>* %p11, align 16
- %12 = load <16 x i8>* %p12, align 16
- %13 = load <16 x i8>* %p13, align 16
- %14 = load <16 x i8>* %p14, align 16
- %15 = load <16 x i8>* %p15, align 16
- %16 = load <16 x i8>* %p16, align 16
- %17 = load <16 x i8>* %p17, align 16
- %18 = load <16 x i8>* %p18, align 16
- %19 = load <16 x i8>* %p19, align 16
- %20 = load <16 x i8>* %p20, align 16
- %21 = load <16 x i8>* %p21, align 16
- %22 = load <16 x i8>* %p22, align 16
- %23 = load <16 x i8>* %p23, align 16
- %24 = load <16 x i8>* %p24, align 16
- %25 = load <16 x i8>* %p25, align 16
- %26 = load <16 x i8>* %p26, align 16
- %27 = load <16 x i8>* %p27, align 16
- %28 = load <16 x i8>* %p28, align 16
- %29 = load <16 x i8>* %p29, align 16
- %30 = load <16 x i8>* %p30, align 16
- %31 = load <16 x i8>* %p31, align 16
- %32 = load <16 x i8>* %p32, align 16
- %33 = load <16 x i8>* %p33, align 16
+ %p1 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 1
+ %p2 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 2
+ %p3 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 3
+ %p4 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 4
+ %p5 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 5
+ %p6 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 6
+ %p7 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 7
+ %p8 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 8
+ %p9 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 9
+ %p10 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 10
+ %p11 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 11
+ %p12 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 12
+ %p13 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 13
+ %p14 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 14
+ %p15 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 15
+ %p16 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 16
+ %p17 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 17
+ %p18 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 18
+ %p19 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 19
+ %p20 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 20
+ %p21 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 21
+ %p22 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 22
+ %p23 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 23
+ %p24 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 24
+ %p25 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 25
+ %p26 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 26
+ %p27 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 27
+ %p28 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 28
+ %p29 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 29
+ %p30 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 30
+ %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31
+ %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32
+ %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33
+ %0 = load <16 x i8>, <16 x i8>* %p0, align 16
+ %1 = load <16 x i8>, <16 x i8>* %p1, align 16
+ %2 = load <16 x i8>, <16 x i8>* %p2, align 16
+ %3 = load <16 x i8>, <16 x i8>* %p3, align 16
+ %4 = load <16 x i8>, <16 x i8>* %p4, align 16
+ %5 = load <16 x i8>, <16 x i8>* %p5, align 16
+ %6 = load <16 x i8>, <16 x i8>* %p6, align 16
+ %7 = load <16 x i8>, <16 x i8>* %p7, align 16
+ %8 = load <16 x i8>, <16 x i8>* %p8, align 16
+ %9 = load <16 x i8>, <16 x i8>* %p9, align 16
+ %10 = load <16 x i8>, <16 x i8>* %p10, align 16
+ %11 = load <16 x i8>, <16 x i8>* %p11, align 16
+ %12 = load <16 x i8>, <16 x i8>* %p12, align 16
+ %13 = load <16 x i8>, <16 x i8>* %p13, align 16
+ %14 = load <16 x i8>, <16 x i8>* %p14, align 16
+ %15 = load <16 x i8>, <16 x i8>* %p15, align 16
+ %16 = load <16 x i8>, <16 x i8>* %p16, align 16
+ %17 = load <16 x i8>, <16 x i8>* %p17, align 16
+ %18 = load <16 x i8>, <16 x i8>* %p18, align 16
+ %19 = load <16 x i8>, <16 x i8>* %p19, align 16
+ %20 = load <16 x i8>, <16 x i8>* %p20, align 16
+ %21 = load <16 x i8>, <16 x i8>* %p21, align 16
+ %22 = load <16 x i8>, <16 x i8>* %p22, align 16
+ %23 = load <16 x i8>, <16 x i8>* %p23, align 16
+ %24 = load <16 x i8>, <16 x i8>* %p24, align 16
+ %25 = load <16 x i8>, <16 x i8>* %p25, align 16
+ %26 = load <16 x i8>, <16 x i8>* %p26, align 16
+ %27 = load <16 x i8>, <16 x i8>* %p27, align 16
+ %28 = load <16 x i8>, <16 x i8>* %p28, align 16
+ %29 = load <16 x i8>, <16 x i8>* %p29, align 16
+ %30 = load <16 x i8>, <16 x i8>* %p30, align 16
+ %31 = load <16 x i8>, <16 x i8>* %p31, align 16
+ %32 = load <16 x i8>, <16 x i8>* %p32, align 16
+ %33 = load <16 x i8>, <16 x i8>* %p33, align 16
%r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
%r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2)
%r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3)
@@ -155,73 +155,73 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
entry:
- %p1 = getelementptr <8 x i16>* %p0, i32 1
- %p2 = getelementptr <8 x i16>* %p0, i32 2
- %p3 = getelementptr <8 x i16>* %p0, i32 3
- %p4 = getelementptr <8 x i16>* %p0, i32 4
- %p5 = getelementptr <8 x i16>* %p0, i32 5
- %p6 = getelementptr <8 x i16>* %p0, i32 6
- %p7 = getelementptr <8 x i16>* %p0, i32 7
- %p8 = getelementptr <8 x i16>* %p0, i32 8
- %p9 = getelementptr <8 x i16>* %p0, i32 9
- %p10 = getelementptr <8 x i16>* %p0, i32 10
- %p11 = getelementptr <8 x i16>* %p0, i32 11
- %p12 = getelementptr <8 x i16>* %p0, i32 12
- %p13 = getelementptr <8 x i16>* %p0, i32 13
- %p14 = getelementptr <8 x i16>* %p0, i32 14
- %p15 = getelementptr <8 x i16>* %p0, i32 15
- %p16 = getelementptr <8 x i16>* %p0, i32 16
- %p17 = getelementptr <8 x i16>* %p0, i32 17
- %p18 = getelementptr <8 x i16>* %p0, i32 18
- %p19 = getelementptr <8 x i16>* %p0, i32 19
- %p20 = getelementptr <8 x i16>* %p0, i32 20
- %p21 = getelementptr <8 x i16>* %p0, i32 21
- %p22 = getelementptr <8 x i16>* %p0, i32 22
- %p23 = getelementptr <8 x i16>* %p0, i32 23
- %p24 = getelementptr <8 x i16>* %p0, i32 24
- %p25 = getelementptr <8 x i16>* %p0, i32 25
- %p26 = getelementptr <8 x i16>* %p0, i32 26
- %p27 = getelementptr <8 x i16>* %p0, i32 27
- %p28 = getelementptr <8 x i16>* %p0, i32 28
- %p29 = getelementptr <8 x i16>* %p0, i32 29
- %p30 = getelementptr <8 x i16>* %p0, i32 30
- %p31 = getelementptr <8 x i16>* %p0, i32 31
- %p32 = getelementptr <8 x i16>* %p0, i32 32
- %p33 = getelementptr <8 x i16>* %p0, i32 33
- %0 = load <8 x i16>* %p0, align 16
- %1 = load <8 x i16>* %p1, align 16
- %2 = load <8 x i16>* %p2, align 16
- %3 = load <8 x i16>* %p3, align 16
- %4 = load <8 x i16>* %p4, align 16
- %5 = load <8 x i16>* %p5, align 16
- %6 = load <8 x i16>* %p6, align 16
- %7 = load <8 x i16>* %p7, align 16
- %8 = load <8 x i16>* %p8, align 16
- %9 = load <8 x i16>* %p9, align 16
- %10 = load <8 x i16>* %p10, align 16
- %11 = load <8 x i16>* %p11, align 16
- %12 = load <8 x i16>* %p12, align 16
- %13 = load <8 x i16>* %p13, align 16
- %14 = load <8 x i16>* %p14, align 16
- %15 = load <8 x i16>* %p15, align 16
- %16 = load <8 x i16>* %p16, align 16
- %17 = load <8 x i16>* %p17, align 16
- %18 = load <8 x i16>* %p18, align 16
- %19 = load <8 x i16>* %p19, align 16
- %20 = load <8 x i16>* %p20, align 16
- %21 = load <8 x i16>* %p21, align 16
- %22 = load <8 x i16>* %p22, align 16
- %23 = load <8 x i16>* %p23, align 16
- %24 = load <8 x i16>* %p24, align 16
- %25 = load <8 x i16>* %p25, align 16
- %26 = load <8 x i16>* %p26, align 16
- %27 = load <8 x i16>* %p27, align 16
- %28 = load <8 x i16>* %p28, align 16
- %29 = load <8 x i16>* %p29, align 16
- %30 = load <8 x i16>* %p30, align 16
- %31 = load <8 x i16>* %p31, align 16
- %32 = load <8 x i16>* %p32, align 16
- %33 = load <8 x i16>* %p33, align 16
+ %p1 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 1
+ %p2 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 2
+ %p3 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 3
+ %p4 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 4
+ %p5 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 5
+ %p6 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 6
+ %p7 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 7
+ %p8 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 8
+ %p9 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 9
+ %p10 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 10
+ %p11 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 11
+ %p12 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 12
+ %p13 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 13
+ %p14 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 14
+ %p15 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 15
+ %p16 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 16
+ %p17 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 17
+ %p18 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 18
+ %p19 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 19
+ %p20 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 20
+ %p21 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 21
+ %p22 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 22
+ %p23 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 23
+ %p24 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 24
+ %p25 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 25
+ %p26 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 26
+ %p27 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 27
+ %p28 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 28
+ %p29 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 29
+ %p30 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 30
+ %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31
+ %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32
+ %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33
+ %0 = load <8 x i16>, <8 x i16>* %p0, align 16
+ %1 = load <8 x i16>, <8 x i16>* %p1, align 16
+ %2 = load <8 x i16>, <8 x i16>* %p2, align 16
+ %3 = load <8 x i16>, <8 x i16>* %p3, align 16
+ %4 = load <8 x i16>, <8 x i16>* %p4, align 16
+ %5 = load <8 x i16>, <8 x i16>* %p5, align 16
+ %6 = load <8 x i16>, <8 x i16>* %p6, align 16
+ %7 = load <8 x i16>, <8 x i16>* %p7, align 16
+ %8 = load <8 x i16>, <8 x i16>* %p8, align 16
+ %9 = load <8 x i16>, <8 x i16>* %p9, align 16
+ %10 = load <8 x i16>, <8 x i16>* %p10, align 16
+ %11 = load <8 x i16>, <8 x i16>* %p11, align 16
+ %12 = load <8 x i16>, <8 x i16>* %p12, align 16
+ %13 = load <8 x i16>, <8 x i16>* %p13, align 16
+ %14 = load <8 x i16>, <8 x i16>* %p14, align 16
+ %15 = load <8 x i16>, <8 x i16>* %p15, align 16
+ %16 = load <8 x i16>, <8 x i16>* %p16, align 16
+ %17 = load <8 x i16>, <8 x i16>* %p17, align 16
+ %18 = load <8 x i16>, <8 x i16>* %p18, align 16
+ %19 = load <8 x i16>, <8 x i16>* %p19, align 16
+ %20 = load <8 x i16>, <8 x i16>* %p20, align 16
+ %21 = load <8 x i16>, <8 x i16>* %p21, align 16
+ %22 = load <8 x i16>, <8 x i16>* %p22, align 16
+ %23 = load <8 x i16>, <8 x i16>* %p23, align 16
+ %24 = load <8 x i16>, <8 x i16>* %p24, align 16
+ %25 = load <8 x i16>, <8 x i16>* %p25, align 16
+ %26 = load <8 x i16>, <8 x i16>* %p26, align 16
+ %27 = load <8 x i16>, <8 x i16>* %p27, align 16
+ %28 = load <8 x i16>, <8 x i16>* %p28, align 16
+ %29 = load <8 x i16>, <8 x i16>* %p29, align 16
+ %30 = load <8 x i16>, <8 x i16>* %p30, align 16
+ %31 = load <8 x i16>, <8 x i16>* %p31, align 16
+ %32 = load <8 x i16>, <8 x i16>* %p32, align 16
+ %33 = load <8 x i16>, <8 x i16>* %p33, align 16
%r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
%r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2)
%r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3)
@@ -304,73 +304,73 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
entry:
- %p1 = getelementptr <4 x i32>* %p0, i32 1
- %p2 = getelementptr <4 x i32>* %p0, i32 2
- %p3 = getelementptr <4 x i32>* %p0, i32 3
- %p4 = getelementptr <4 x i32>* %p0, i32 4
- %p5 = getelementptr <4 x i32>* %p0, i32 5
- %p6 = getelementptr <4 x i32>* %p0, i32 6
- %p7 = getelementptr <4 x i32>* %p0, i32 7
- %p8 = getelementptr <4 x i32>* %p0, i32 8
- %p9 = getelementptr <4 x i32>* %p0, i32 9
- %p10 = getelementptr <4 x i32>* %p0, i32 10
- %p11 = getelementptr <4 x i32>* %p0, i32 11
- %p12 = getelementptr <4 x i32>* %p0, i32 12
- %p13 = getelementptr <4 x i32>* %p0, i32 13
- %p14 = getelementptr <4 x i32>* %p0, i32 14
- %p15 = getelementptr <4 x i32>* %p0, i32 15
- %p16 = getelementptr <4 x i32>* %p0, i32 16
- %p17 = getelementptr <4 x i32>* %p0, i32 17
- %p18 = getelementptr <4 x i32>* %p0, i32 18
- %p19 = getelementptr <4 x i32>* %p0, i32 19
- %p20 = getelementptr <4 x i32>* %p0, i32 20
- %p21 = getelementptr <4 x i32>* %p0, i32 21
- %p22 = getelementptr <4 x i32>* %p0, i32 22
- %p23 = getelementptr <4 x i32>* %p0, i32 23
- %p24 = getelementptr <4 x i32>* %p0, i32 24
- %p25 = getelementptr <4 x i32>* %p0, i32 25
- %p26 = getelementptr <4 x i32>* %p0, i32 26
- %p27 = getelementptr <4 x i32>* %p0, i32 27
- %p28 = getelementptr <4 x i32>* %p0, i32 28
- %p29 = getelementptr <4 x i32>* %p0, i32 29
- %p30 = getelementptr <4 x i32>* %p0, i32 30
- %p31 = getelementptr <4 x i32>* %p0, i32 31
- %p32 = getelementptr <4 x i32>* %p0, i32 32
- %p33 = getelementptr <4 x i32>* %p0, i32 33
- %0 = load <4 x i32>* %p0, align 16
- %1 = load <4 x i32>* %p1, align 16
- %2 = load <4 x i32>* %p2, align 16
- %3 = load <4 x i32>* %p3, align 16
- %4 = load <4 x i32>* %p4, align 16
- %5 = load <4 x i32>* %p5, align 16
- %6 = load <4 x i32>* %p6, align 16
- %7 = load <4 x i32>* %p7, align 16
- %8 = load <4 x i32>* %p8, align 16
- %9 = load <4 x i32>* %p9, align 16
- %10 = load <4 x i32>* %p10, align 16
- %11 = load <4 x i32>* %p11, align 16
- %12 = load <4 x i32>* %p12, align 16
- %13 = load <4 x i32>* %p13, align 16
- %14 = load <4 x i32>* %p14, align 16
- %15 = load <4 x i32>* %p15, align 16
- %16 = load <4 x i32>* %p16, align 16
- %17 = load <4 x i32>* %p17, align 16
- %18 = load <4 x i32>* %p18, align 16
- %19 = load <4 x i32>* %p19, align 16
- %20 = load <4 x i32>* %p20, align 16
- %21 = load <4 x i32>* %p21, align 16
- %22 = load <4 x i32>* %p22, align 16
- %23 = load <4 x i32>* %p23, align 16
- %24 = load <4 x i32>* %p24, align 16
- %25 = load <4 x i32>* %p25, align 16
- %26 = load <4 x i32>* %p26, align 16
- %27 = load <4 x i32>* %p27, align 16
- %28 = load <4 x i32>* %p28, align 16
- %29 = load <4 x i32>* %p29, align 16
- %30 = load <4 x i32>* %p30, align 16
- %31 = load <4 x i32>* %p31, align 16
- %32 = load <4 x i32>* %p32, align 16
- %33 = load <4 x i32>* %p33, align 16
+ %p1 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 1
+ %p2 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 2
+ %p3 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 3
+ %p4 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 4
+ %p5 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 5
+ %p6 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 6
+ %p7 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 7
+ %p8 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 8
+ %p9 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 9
+ %p10 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 10
+ %p11 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 11
+ %p12 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 12
+ %p13 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 13
+ %p14 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 14
+ %p15 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 15
+ %p16 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 16
+ %p17 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 17
+ %p18 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 18
+ %p19 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 19
+ %p20 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 20
+ %p21 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 21
+ %p22 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 22
+ %p23 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 23
+ %p24 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 24
+ %p25 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 25
+ %p26 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 26
+ %p27 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 27
+ %p28 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 28
+ %p29 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 29
+ %p30 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 30
+ %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31
+ %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32
+ %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33
+ %0 = load <4 x i32>, <4 x i32>* %p0, align 16
+ %1 = load <4 x i32>, <4 x i32>* %p1, align 16
+ %2 = load <4 x i32>, <4 x i32>* %p2, align 16
+ %3 = load <4 x i32>, <4 x i32>* %p3, align 16
+ %4 = load <4 x i32>, <4 x i32>* %p4, align 16
+ %5 = load <4 x i32>, <4 x i32>* %p5, align 16
+ %6 = load <4 x i32>, <4 x i32>* %p6, align 16
+ %7 = load <4 x i32>, <4 x i32>* %p7, align 16
+ %8 = load <4 x i32>, <4 x i32>* %p8, align 16
+ %9 = load <4 x i32>, <4 x i32>* %p9, align 16
+ %10 = load <4 x i32>, <4 x i32>* %p10, align 16
+ %11 = load <4 x i32>, <4 x i32>* %p11, align 16
+ %12 = load <4 x i32>, <4 x i32>* %p12, align 16
+ %13 = load <4 x i32>, <4 x i32>* %p13, align 16
+ %14 = load <4 x i32>, <4 x i32>* %p14, align 16
+ %15 = load <4 x i32>, <4 x i32>* %p15, align 16
+ %16 = load <4 x i32>, <4 x i32>* %p16, align 16
+ %17 = load <4 x i32>, <4 x i32>* %p17, align 16
+ %18 = load <4 x i32>, <4 x i32>* %p18, align 16
+ %19 = load <4 x i32>, <4 x i32>* %p19, align 16
+ %20 = load <4 x i32>, <4 x i32>* %p20, align 16
+ %21 = load <4 x i32>, <4 x i32>* %p21, align 16
+ %22 = load <4 x i32>, <4 x i32>* %p22, align 16
+ %23 = load <4 x i32>, <4 x i32>* %p23, align 16
+ %24 = load <4 x i32>, <4 x i32>* %p24, align 16
+ %25 = load <4 x i32>, <4 x i32>* %p25, align 16
+ %26 = load <4 x i32>, <4 x i32>* %p26, align 16
+ %27 = load <4 x i32>, <4 x i32>* %p27, align 16
+ %28 = load <4 x i32>, <4 x i32>* %p28, align 16
+ %29 = load <4 x i32>, <4 x i32>* %p29, align 16
+ %30 = load <4 x i32>, <4 x i32>* %p30, align 16
+ %31 = load <4 x i32>, <4 x i32>* %p31, align 16
+ %32 = load <4 x i32>, <4 x i32>* %p32, align 16
+ %33 = load <4 x i32>, <4 x i32>* %p33, align 16
%r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
%r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2)
%r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3)
@@ -453,73 +453,73 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
entry:
- %p1 = getelementptr <2 x i64>* %p0, i32 1
- %p2 = getelementptr <2 x i64>* %p0, i32 2
- %p3 = getelementptr <2 x i64>* %p0, i32 3
- %p4 = getelementptr <2 x i64>* %p0, i32 4
- %p5 = getelementptr <2 x i64>* %p0, i32 5
- %p6 = getelementptr <2 x i64>* %p0, i32 6
- %p7 = getelementptr <2 x i64>* %p0, i32 7
- %p8 = getelementptr <2 x i64>* %p0, i32 8
- %p9 = getelementptr <2 x i64>* %p0, i32 9
- %p10 = getelementptr <2 x i64>* %p0, i32 10
- %p11 = getelementptr <2 x i64>* %p0, i32 11
- %p12 = getelementptr <2 x i64>* %p0, i32 12
- %p13 = getelementptr <2 x i64>* %p0, i32 13
- %p14 = getelementptr <2 x i64>* %p0, i32 14
- %p15 = getelementptr <2 x i64>* %p0, i32 15
- %p16 = getelementptr <2 x i64>* %p0, i32 16
- %p17 = getelementptr <2 x i64>* %p0, i32 17
- %p18 = getelementptr <2 x i64>* %p0, i32 18
- %p19 = getelementptr <2 x i64>* %p0, i32 19
- %p20 = getelementptr <2 x i64>* %p0, i32 20
- %p21 = getelementptr <2 x i64>* %p0, i32 21
- %p22 = getelementptr <2 x i64>* %p0, i32 22
- %p23 = getelementptr <2 x i64>* %p0, i32 23
- %p24 = getelementptr <2 x i64>* %p0, i32 24
- %p25 = getelementptr <2 x i64>* %p0, i32 25
- %p26 = getelementptr <2 x i64>* %p0, i32 26
- %p27 = getelementptr <2 x i64>* %p0, i32 27
- %p28 = getelementptr <2 x i64>* %p0, i32 28
- %p29 = getelementptr <2 x i64>* %p0, i32 29
- %p30 = getelementptr <2 x i64>* %p0, i32 30
- %p31 = getelementptr <2 x i64>* %p0, i32 31
- %p32 = getelementptr <2 x i64>* %p0, i32 32
- %p33 = getelementptr <2 x i64>* %p0, i32 33
- %0 = load <2 x i64>* %p0, align 16
- %1 = load <2 x i64>* %p1, align 16
- %2 = load <2 x i64>* %p2, align 16
- %3 = load <2 x i64>* %p3, align 16
- %4 = load <2 x i64>* %p4, align 16
- %5 = load <2 x i64>* %p5, align 16
- %6 = load <2 x i64>* %p6, align 16
- %7 = load <2 x i64>* %p7, align 16
- %8 = load <2 x i64>* %p8, align 16
- %9 = load <2 x i64>* %p9, align 16
- %10 = load <2 x i64>* %p10, align 16
- %11 = load <2 x i64>* %p11, align 16
- %12 = load <2 x i64>* %p12, align 16
- %13 = load <2 x i64>* %p13, align 16
- %14 = load <2 x i64>* %p14, align 16
- %15 = load <2 x i64>* %p15, align 16
- %16 = load <2 x i64>* %p16, align 16
- %17 = load <2 x i64>* %p17, align 16
- %18 = load <2 x i64>* %p18, align 16
- %19 = load <2 x i64>* %p19, align 16
- %20 = load <2 x i64>* %p20, align 16
- %21 = load <2 x i64>* %p21, align 16
- %22 = load <2 x i64>* %p22, align 16
- %23 = load <2 x i64>* %p23, align 16
- %24 = load <2 x i64>* %p24, align 16
- %25 = load <2 x i64>* %p25, align 16
- %26 = load <2 x i64>* %p26, align 16
- %27 = load <2 x i64>* %p27, align 16
- %28 = load <2 x i64>* %p28, align 16
- %29 = load <2 x i64>* %p29, align 16
- %30 = load <2 x i64>* %p30, align 16
- %31 = load <2 x i64>* %p31, align 16
- %32 = load <2 x i64>* %p32, align 16
- %33 = load <2 x i64>* %p33, align 16
+ %p1 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 1
+ %p2 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 2
+ %p3 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 3
+ %p4 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 4
+ %p5 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 5
+ %p6 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 6
+ %p7 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 7
+ %p8 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 8
+ %p9 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 9
+ %p10 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 10
+ %p11 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 11
+ %p12 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 12
+ %p13 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 13
+ %p14 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 14
+ %p15 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 15
+ %p16 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 16
+ %p17 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 17
+ %p18 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 18
+ %p19 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 19
+ %p20 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 20
+ %p21 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 21
+ %p22 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 22
+ %p23 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 23
+ %p24 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 24
+ %p25 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 25
+ %p26 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 26
+ %p27 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 27
+ %p28 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 28
+ %p29 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 29
+ %p30 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 30
+ %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31
+ %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32
+ %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33
+ %0 = load <2 x i64>, <2 x i64>* %p0, align 16
+ %1 = load <2 x i64>, <2 x i64>* %p1, align 16
+ %2 = load <2 x i64>, <2 x i64>* %p2, align 16
+ %3 = load <2 x i64>, <2 x i64>* %p3, align 16
+ %4 = load <2 x i64>, <2 x i64>* %p4, align 16
+ %5 = load <2 x i64>, <2 x i64>* %p5, align 16
+ %6 = load <2 x i64>, <2 x i64>* %p6, align 16
+ %7 = load <2 x i64>, <2 x i64>* %p7, align 16
+ %8 = load <2 x i64>, <2 x i64>* %p8, align 16
+ %9 = load <2 x i64>, <2 x i64>* %p9, align 16
+ %10 = load <2 x i64>, <2 x i64>* %p10, align 16
+ %11 = load <2 x i64>, <2 x i64>* %p11, align 16
+ %12 = load <2 x i64>, <2 x i64>* %p12, align 16
+ %13 = load <2 x i64>, <2 x i64>* %p13, align 16
+ %14 = load <2 x i64>, <2 x i64>* %p14, align 16
+ %15 = load <2 x i64>, <2 x i64>* %p15, align 16
+ %16 = load <2 x i64>, <2 x i64>* %p16, align 16
+ %17 = load <2 x i64>, <2 x i64>* %p17, align 16
+ %18 = load <2 x i64>, <2 x i64>* %p18, align 16
+ %19 = load <2 x i64>, <2 x i64>* %p19, align 16
+ %20 = load <2 x i64>, <2 x i64>* %p20, align 16
+ %21 = load <2 x i64>, <2 x i64>* %p21, align 16
+ %22 = load <2 x i64>, <2 x i64>* %p22, align 16
+ %23 = load <2 x i64>, <2 x i64>* %p23, align 16
+ %24 = load <2 x i64>, <2 x i64>* %p24, align 16
+ %25 = load <2 x i64>, <2 x i64>* %p25, align 16
+ %26 = load <2 x i64>, <2 x i64>* %p26, align 16
+ %27 = load <2 x i64>, <2 x i64>* %p27, align 16
+ %28 = load <2 x i64>, <2 x i64>* %p28, align 16
+ %29 = load <2 x i64>, <2 x i64>* %p29, align 16
+ %30 = load <2 x i64>, <2 x i64>* %p30, align 16
+ %31 = load <2 x i64>, <2 x i64>* %p31, align 16
+ %32 = load <2 x i64>, <2 x i64>* %p32, align 16
+ %33 = load <2 x i64>, <2 x i64>* %p33, align 16
%r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
%r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2)
%r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3)
diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll
index d5b97f5..8790923 100644
--- a/test/CodeGen/Mips/msa/vec.ll
+++ b/test/CodeGen/Mips/msa/vec.ll
@@ -9,8 +9,8 @@
define void @llvm_mips_and_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
@@ -32,8 +32,8 @@ entry:
define void @llvm_mips_and_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
@@ -55,8 +55,8 @@ entry:
define void @llvm_mips_and_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
@@ -78,8 +78,8 @@ entry:
define void @llvm_mips_and_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
@@ -97,8 +97,8 @@ entry:
;
define void @and_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
%2 = and <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
ret void
@@ -113,8 +113,8 @@ entry:
;
define void @and_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
%2 = and <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
ret void
@@ -130,8 +130,8 @@ entry:
define void @and_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
%2 = and <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
ret void
@@ -147,8 +147,8 @@ entry:
define void @and_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
%2 = and <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
ret void
@@ -168,9 +168,9 @@ entry:
define void @llvm_mips_bmnz_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
@@ -198,9 +198,9 @@ entry:
define void @llvm_mips_bmnz_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
@@ -228,9 +228,9 @@ entry:
define void @llvm_mips_bmnz_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
@@ -258,9 +258,9 @@ entry:
define void @llvm_mips_bmnz_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
@@ -288,9 +288,9 @@ entry:
define void @llvm_mips_bmz_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
@@ -319,9 +319,9 @@ entry:
define void @llvm_mips_bmz_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
@@ -350,9 +350,9 @@ entry:
define void @llvm_mips_bmz_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
@@ -381,9 +381,9 @@ entry:
define void @llvm_mips_bmz_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
@@ -412,9 +412,9 @@ entry:
define void @llvm_mips_bsel_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
- %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2
+ %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
@@ -443,9 +443,9 @@ entry:
define void @llvm_mips_bsel_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
- %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2
+ %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
@@ -474,9 +474,9 @@ entry:
define void @llvm_mips_bsel_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
- %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2
+ %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
@@ -505,9 +505,9 @@ entry:
define void @llvm_mips_bsel_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
- %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2
+ %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
@@ -535,8 +535,8 @@ entry:
define void @llvm_mips_nor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
@@ -558,8 +558,8 @@ entry:
define void @llvm_mips_nor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
@@ -581,8 +581,8 @@ entry:
define void @llvm_mips_nor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
@@ -604,8 +604,8 @@ entry:
define void @llvm_mips_nor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
@@ -627,8 +627,8 @@ entry:
define void @llvm_mips_or_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
@@ -650,8 +650,8 @@ entry:
define void @llvm_mips_or_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
@@ -673,8 +673,8 @@ entry:
define void @llvm_mips_or_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
@@ -696,8 +696,8 @@ entry:
define void @llvm_mips_or_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
@@ -715,8 +715,8 @@ entry:
;
define void @or_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
%2 = or <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
ret void
@@ -731,8 +731,8 @@ entry:
;
define void @or_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
%2 = or <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
ret void
@@ -748,8 +748,8 @@ entry:
define void @or_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
%2 = or <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
ret void
@@ -765,8 +765,8 @@ entry:
define void @or_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
%2 = or <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
ret void
@@ -785,8 +785,8 @@ entry:
define void @llvm_mips_xor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
@@ -808,8 +808,8 @@ entry:
define void @llvm_mips_xor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
@@ -831,8 +831,8 @@ entry:
define void @llvm_mips_xor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
@@ -854,8 +854,8 @@ entry:
define void @llvm_mips_xor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
@@ -873,8 +873,8 @@ entry:
;
define void @xor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
- %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
%2 = xor <16 x i8> %0, %1
store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
ret void
@@ -889,8 +889,8 @@ entry:
;
define void @xor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
- %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
%2 = xor <8 x i16> %0, %1
store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
ret void
@@ -906,8 +906,8 @@ entry:
define void @xor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
- %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
%2 = xor <4 x i32> %0, %1
store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
ret void
@@ -923,8 +923,8 @@ entry:
define void @xor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
- %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
%2 = xor <2 x i64> %0, %1
store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
ret void
diff --git a/test/CodeGen/Mips/msa/vecs10.ll b/test/CodeGen/Mips/msa/vecs10.ll
index e22e075..f442f77 100644
--- a/test/CodeGen/Mips/msa/vecs10.ll
+++ b/test/CodeGen/Mips/msa/vecs10.ll
@@ -7,7 +7,7 @@
define i32 @llvm_mips_bnz_v_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bnz_v_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_v_ARG1
%1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
@@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind
define i32 @llvm_mips_bz_v_test() nounwind {
entry:
- %0 = load <16 x i8>* @llvm_mips_bz_v_ARG1
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bz_v_ARG1
%1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false