diff options
Diffstat (limited to 'test/CodeGen/Mips/msa/3r-d.ll')
-rw-r--r-- | test/CodeGen/Mips/msa/3r-d.ll | 88 |
1 files changed, 44 insertions, 44 deletions
diff --git a/test/CodeGen/Mips/msa/3r-d.ll b/test/CodeGen/Mips/msa/3r-d.ll index 0099554..4fc32b7 100644 --- a/test/CodeGen/Mips/msa/3r-d.ll +++ b/test/CodeGen/Mips/msa/3r-d.ll @@ -10,8 +10,8 @@ define void @llvm_mips_div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -95,8 +95,8 @@ declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind define void @div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = sdiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -111,8 +111,8 @@ entry: define void @div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = sdiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -127,8 +127,8 @@ entry: define void @div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = sdiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -143,8 +143,8 @@ entry: define void @div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = sdiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -163,8 +163,8 @@ entry: define void @llvm_mips_div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -185,8 +185,8 @@ declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -207,8 +207,8 @@ declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -229,8 +229,8 @@ declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -248,8 +248,8 @@ declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind define void @div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = udiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -264,8 +264,8 @@ entry: define void @div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = udiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -280,8 +280,8 @@ entry: define void @div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = udiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -296,8 +296,8 @@ entry: define void @div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = udiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -326,8 +326,8 @@ entry: define void @llvm_mips_dotp_s_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES ret void @@ -353,8 +353,8 @@ declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_s_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES ret void @@ -377,8 +377,8 @@ declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_s_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES ret void @@ -409,8 +409,8 @@ declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dotp_u_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES ret void @@ -436,8 +436,8 @@ declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_u_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES ret void @@ -460,8 +460,8 @@ declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_u_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES ret void |