diff options
author | Stephen Hines <srhines@google.com> | 2014-04-23 16:57:46 -0700 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2014-04-24 15:53:16 -0700 |
commit | 36b56886974eae4f9c5ebc96befd3e7bfe5de338 (patch) | |
tree | e6cfb69fbbd937f450eeb83bfb83b9da3b01275a /test/CodeGen/Mips/msa | |
parent | 69a8640022b04415ae9fac62f8ab090601d8f889 (diff) | |
download | external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.zip external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.gz external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.bz2 |
Update to LLVM 3.5a.
Change-Id: Ifadecab779f128e62e430c2b4f6ddd84953ed617
Diffstat (limited to 'test/CodeGen/Mips/msa')
-rw-r--r-- | test/CodeGen/Mips/msa/2r_vector_scalar.ll | 69 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/3r-s.ll | 86 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/arithmetic_float.ll | 3 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/basic_operations.ll | 30 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/basic_operations_float.ll | 45 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/bitwise.ll | 5 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/compare.ll | 34 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/compare_float.ll | 28 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/elm_copy.ll | 136 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/elm_insv.ll | 138 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/elm_shift_slide.ll | 32 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/frameindex.ll | 309 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll | 2 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/shift-dagcombine.ll | 3 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/shuffle.ll | 46 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/special.ll | 40 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/vec.ll | 168 |
17 files changed, 835 insertions, 339 deletions
diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll index 6f6e1b9..64e459e 100644 --- a/test/CodeGen/Mips/msa/2r_vector_scalar.ll +++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll @@ -1,8 +1,14 @@ ; Test the MSA intrinsics that are encoded with the 2R instruction format and ; convert scalars to vectors. -; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s -; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s +; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 +; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 @llvm_mips_fill_b_ARG1 = global i32 23, align 16 @llvm_mips_fill_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 @@ -17,11 +23,12 @@ entry: declare <16 x i8> @llvm.mips.fill.b(i32) nounwind -; CHECK: llvm_mips_fill_b_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], -; CHECK-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]] -; CHECK-DAG: st.b [[R2]], -; CHECK: .size llvm_mips_fill_b_test +; MIPS-ANY: llvm_mips_fill_b_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], +; MIPS64-DAG: ld [[R1:\$[0-9]+]], +; MIPS-ANY-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]] +; MIPS-ANY-DAG: st.b [[R2]], +; MIPS-ANY: .size llvm_mips_fill_b_test ; @llvm_mips_fill_h_ARG1 = global i32 23, align 16 @llvm_mips_fill_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 @@ -36,11 +43,12 @@ entry: declare <8 x i16> @llvm.mips.fill.h(i32) nounwind -; CHECK: llvm_mips_fill_h_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], -; CHECK-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]] -; CHECK-DAG: st.h [[R2]], -; CHECK: .size llvm_mips_fill_h_test +; MIPS-ANY: llvm_mips_fill_h_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], +; MIPS64-DAG: ld [[R1:\$[0-9]+]], +; MIPS-ANY-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]] +; MIPS-ANY-DAG: st.h [[R2]], +; MIPS-ANY: .size llvm_mips_fill_h_test ; @llvm_mips_fill_w_ARG1 = global i32 23, align 16 @llvm_mips_fill_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 @@ -55,11 +63,12 @@ entry: declare <4 x i32> @llvm.mips.fill.w(i32) nounwind -; CHECK: llvm_mips_fill_w_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], -; CHECK-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]] -; CHECK-DAG: st.w [[R2]], -; CHECK: .size llvm_mips_fill_w_test +; MIPS-ANY: llvm_mips_fill_w_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], +; MIPS64-DAG: ld [[R1:\$[0-9]+]], +; MIPS-ANY-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]] +; MIPS-ANY-DAG: st.w [[R2]], +; MIPS-ANY: .size llvm_mips_fill_w_test ; @llvm_mips_fill_d_ARG1 = global i64 23, align 16 @llvm_mips_fill_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 @@ -74,14 +83,18 @@ entry: declare <2 x i64> @llvm.mips.fill.d(i64) nounwind -; CHECK: llvm_mips_fill_d_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], 0( -; CHECK-DAG: lw [[R2:\$[0-9]+]], 4( -; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 0 -; CHECK-DAG: insert.w [[R3]][0], [[R1]] -; CHECK-DAG: insert.w [[R3]][1], [[R2]] -; CHECK-DAG: insert.w [[R3]][2], [[R1]] -; CHECK-DAG: insert.w [[R3]][3], [[R2]] -; CHECK-DAG: st.w [[R3]], -; CHECK: .size llvm_mips_fill_d_test -; +; MIPS-ANY: llvm_mips_fill_d_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], 0( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], 4( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_fill_d_ARG1) +; MIPS32-DAG: ldi.b [[R3:\$w[0-9]+]], 0 +; MIPS32-DAG: insert.w [[R3]][0], [[R1]] +; MIPS32-DAG: insert.w [[R3]][1], [[R2]] +; MIPS32-DAG: insert.w [[R3]][2], [[R1]] +; MIPS32-DAG: insert.w [[R3]][3], [[R2]] +; MIPS64-DAG: fill.d [[WD:\$w[0-9]+]], [[R1]] +; MIPS32-DAG: st.w [[R3]], +; MIPS64-DAG: ld [[RD:\$[0-9]+]], %got_disp(llvm_mips_fill_d_RES) +; MIPS64-DAG: st.d [[WD]], 0([[RD]]) +; MIPS-ANY: .size llvm_mips_fill_d_test +;
\ No newline at end of file diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll index 30cf265..581c3bf 100644 --- a/test/CodeGen/Mips/msa/3r-s.ll +++ b/test/CodeGen/Mips/msa/3r-s.ll @@ -5,98 +5,114 @@ ; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s @llvm_mips_sld_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 -@llvm_mips_sld_b_ARG2 = global i32 10, align 16 +@llvm_mips_sld_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_sld_b_ARG3 = global i32 10, align 16 @llvm_mips_sld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 define void @llvm_mips_sld_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1 - %1 = load i32* @llvm_mips_sld_b_ARG2 - %2 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, i32 %1) - store <16 x i8> %2, <16 x i8>* @llvm_mips_sld_b_RES + %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2 + %2 = load i32* @llvm_mips_sld_b_ARG3 + %3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2) + store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES ret void } -declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, i32) nounwind +declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind ; CHECK: llvm_mips_sld_b_test: ; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_b_ARG1) ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_b_ARG2) -; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]]) -; CHECK-DAG: sld.b [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}} +; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_b_ARG3) +; CHECK-DAG: ld.b [[WD:\$w[0-9]+]], 0([[R1]]) +; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R2]]) +; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]]) +; CHECK-DAG: sld.b [[WD]], [[WS]]{{\[}}[[RT]]{{\]}} ; CHECK-DAG: st.b [[WD]] ; CHECK: .size llvm_mips_sld_b_test ; @llvm_mips_sld_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 -@llvm_mips_sld_h_ARG2 = global i32 10, align 16 +@llvm_mips_sld_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_sld_h_ARG3 = global i32 10, align 16 @llvm_mips_sld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 define void @llvm_mips_sld_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1 - %1 = load i32* @llvm_mips_sld_h_ARG2 - %2 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, i32 %1) - store <8 x i16> %2, <8 x i16>* @llvm_mips_sld_h_RES + %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2 + %2 = load i32* @llvm_mips_sld_h_ARG3 + %3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2) + store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES ret void } -declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, i32) nounwind +declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind ; CHECK: llvm_mips_sld_h_test: ; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_h_ARG1) -; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2) -; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]]) -; CHECK-DAG: sld.h [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}} +; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2) +; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_h_ARG3) +; CHECK-DAG: ld.h [[WD:\$w[0-9]+]], 0([[R1]]) +; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R2]]) +; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]]) +; CHECK-DAG: sld.h [[WD]], [[WS]]{{\[}}[[RT]]{{\]}} ; CHECK-DAG: st.h [[WD]] ; CHECK: .size llvm_mips_sld_h_test ; @llvm_mips_sld_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 -@llvm_mips_sld_w_ARG2 = global i32 10, align 16 +@llvm_mips_sld_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_sld_w_ARG3 = global i32 10, align 16 @llvm_mips_sld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 define void @llvm_mips_sld_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1 - %1 = load i32* @llvm_mips_sld_w_ARG2 - %2 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, i32 %1) - store <4 x i32> %2, <4 x i32>* @llvm_mips_sld_w_RES + %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2 + %2 = load i32* @llvm_mips_sld_w_ARG3 + %3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2) + store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES ret void } -declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, i32) nounwind +declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind ; CHECK: llvm_mips_sld_w_test: ; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_w_ARG1) -; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2) -; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]]) -; CHECK-DAG: sld.w [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}} +; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2) +; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_w_ARG3) +; CHECK-DAG: ld.w [[WD:\$w[0-9]+]], 0([[R1]]) +; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R2]]) +; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]]) +; CHECK-DAG: sld.w [[WD]], [[WS]]{{\[}}[[RT]]{{\]}} ; CHECK-DAG: st.w [[WD]] ; CHECK: .size llvm_mips_sld_w_test ; @llvm_mips_sld_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 -@llvm_mips_sld_d_ARG2 = global i32 10, align 16 +@llvm_mips_sld_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_sld_d_ARG3 = global i32 10, align 16 @llvm_mips_sld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 define void @llvm_mips_sld_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1 - %1 = load i32* @llvm_mips_sld_d_ARG2 - %2 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, i32 %1) - store <2 x i64> %2, <2 x i64>* @llvm_mips_sld_d_RES + %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2 + %2 = load i32* @llvm_mips_sld_d_ARG3 + %3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2) + store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES ret void } -declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, i32) nounwind +declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind ; CHECK: llvm_mips_sld_d_test: ; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_d_ARG1) -; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2) -; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]]) -; CHECK-DAG: sld.d [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}} +; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2) +; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_d_ARG3) +; CHECK-DAG: ld.d [[WD:\$w[0-9]+]], 0([[R1]]) +; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R2]]) +; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]]) +; CHECK-DAG: sld.d [[WD]], [[WS]]{{\[}}[[RT]]{{\]}} ; CHECK-DAG: st.d [[WD]] ; CHECK: .size llvm_mips_sld_d_test ; diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll index dc38721..86e57ac 100644 --- a/test/CodeGen/Mips/msa/arithmetic_float.ll +++ b/test/CodeGen/Mips/msa/arithmetic_float.ll @@ -295,7 +295,8 @@ define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1) %3 = fmul <2 x double> <double 2.0, double 2.0>, %2 - ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo( + ; CHECK-DAG: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[G_PTR]]) ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]] store <2 x double> %3, <2 x double>* %c ; CHECK-DAG: st.d [[R4]], 0($4) diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll index 0169a07..2725e9a 100644 --- a/test/CodeGen/Mips/msa/basic_operations.ll +++ b/test/CodeGen/Mips/msa/basic_operations.ll @@ -18,10 +18,12 @@ define void @const_v16i8() nounwind { ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1 store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8 - ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8 - ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8 ; MIPS32-BE: ldi.h [[R1:\$w[0-9]+]], 256 @@ -35,7 +37,8 @@ define void @const_v16i8() nounwind { ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]] store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8 - ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32-AE: .size const_v16i8 @@ -51,7 +54,8 @@ define void @const_v8i16() nounwind { ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1 store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16 - ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16 ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 4 @@ -64,7 +68,8 @@ define void @const_v8i16() nounwind { ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]] store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16 - ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32-AE: .size const_v8i16 @@ -80,7 +85,8 @@ define void @const_v4i32() nounwind { ; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1 store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32 - ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32 ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1 @@ -89,10 +95,12 @@ define void @const_v4i32() nounwind { ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1 store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32 - ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32 - ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32-AE: .size const_v4i32 @@ -117,10 +125,12 @@ define void @const_v2i64() nounwind { ; MIPS32-AE: ldi.d [[R1:\$w[0-9]+]], 1 store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64 - ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64 - ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32-AE: .size const_v2i64 diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll index 1f53810..c8cef44 100644 --- a/test/CodeGen/Mips/msa/basic_operations_float.ll +++ b/test/CodeGen/Mips/msa/basic_operations_float.ll @@ -17,7 +17,8 @@ define void @const_v4f32() nounwind { ; MIPS32: fill.w [[R2:\$w[0-9]+]], [[R1]] store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32 - ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32 ; MIPS32: lui [[R1:\$[0-9]+]], 18304 @@ -25,10 +26,12 @@ define void @const_v4f32() nounwind { ; MIPS32: fill.w [[R3:\$w[0-9]+]], [[R2]] store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32 - ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32 - ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32: .size const_v4f32 @@ -41,22 +44,28 @@ define void @const_v2f64() nounwind { ; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0 store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64 - ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo( + ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($ + ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]]) ret void ; MIPS32: .size const_v2f64 @@ -128,6 +137,24 @@ define float @extract_v4f32_elt0() nounwind { ; MIPS32: .size extract_v4f32_elt0 } +define float @extract_v4f32_elt2() nounwind { + ; MIPS32: extract_v4f32_elt2: + + %1 = load <4 x float>* @v4f32 + ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], + + %2 = fadd <4 x float> %1, %1 + ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] + + %3 = extractelement <4 x float> %2, i32 2 + ; Element 2 can be obtained by splatting it across the vector and extracting + ; $w0:sub_lo + ; MIPS32-DAG: splati.w $w0, [[R1]][2] + + ret float %3 + ; MIPS32: .size extract_v4f32_elt2 +} + define double @extract_v2f64() nounwind { ; MIPS32: extract_v2f64: diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll index 9a88c47..5d57198 100644 --- a/test/CodeGen/Mips/msa/bitwise.ll +++ b/test/CodeGen/Mips/msa/bitwise.ll @@ -990,9 +990,10 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %6 = and <16 x i8> %2, %4 %7 = or <16 x i8> %5, %6 ; bmnz is the same operation - ; CHECK-DAG: bmnz.v [[R1]], [[R2]], [[R3]] + ; (vselect Mask, IfSet, IfClr) -> (BMNZ IfClr, IfSet, Mask) + ; CHECK-DAG: bmnz.v [[R2]], [[R1]], [[R3]] store <16 x i8> %7, <16 x i8>* %c - ; CHECK-DAG: st.b [[R1]], 0($4) + ; CHECK-DAG: st.b [[R2]], 0($4) ret void ; CHECK: .size bsel_v16i8 diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll index 6408d7b..87ca148 100644 --- a/test/CodeGen/Mips/msa/compare.ll +++ b/test/CodeGen/Mips/msa/compare.ll @@ -761,7 +761,8 @@ define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, %4 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <8 x i16> %5, <8 x i16>* %d ; CHECK-DAG: st.h [[R4]], 0($4) @@ -782,7 +783,8 @@ define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, %4 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <4 x i32> %5, <4 x i32>* %d ; CHECK-DAG: st.w [[R4]], 0($4) @@ -803,7 +805,8 @@ define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, %4 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <2 x i64> %5, <2 x i64>* %d ; CHECK-DAG: st.d [[R4]], 0($4) @@ -846,7 +849,8 @@ define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, %4 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <8 x i16> %5, <8 x i16>* %d ; CHECK-DAG: st.h [[R4]], 0($4) @@ -867,7 +871,8 @@ define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, %4 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <4 x i32> %5, <4 x i32>* %d ; CHECK-DAG: st.w [[R4]], 0($4) @@ -888,7 +893,8 @@ define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, %4 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <2 x i64> %5, <2 x i64>* %d ; CHECK-DAG: st.d [[R4]], 0($4) @@ -906,7 +912,7 @@ define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <16 x i8> %1, %2 ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1 ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1 store <16 x i8> %4, <16 x i8>* %d ; CHECK-DAG: st.b [[R4]], 0($4) @@ -925,7 +931,7 @@ define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1 ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <8 x i16> %4, <8 x i16>* %d @@ -945,7 +951,7 @@ define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> + %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1 ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <4 x i32> %4, <4 x i32>* %d @@ -965,7 +971,7 @@ define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1> + %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1 ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <2 x i64> %4, <2 x i64>* %d @@ -985,7 +991,7 @@ define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <16 x i8> %1, %2 ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1 ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1 store <16 x i8> %4, <16 x i8>* %d ; CHECK-DAG: st.b [[R4]], 0($4) @@ -1004,7 +1010,7 @@ define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1 ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <8 x i16> %4, <8 x i16>* %d @@ -1024,7 +1030,7 @@ define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1> + %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1 ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <4 x i32> %4, <4 x i32>* %d @@ -1044,7 +1050,7 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] - %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1> + %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1 ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] store <2 x i64> %4, <2 x i64>* %d diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll index 2fc61f8..e93221b 100644 --- a/test/CodeGen/Mips/msa/compare_float.ll +++ b/test/CodeGen/Mips/msa/compare_float.ll @@ -32,12 +32,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun store <2 x i64> %4, <2 x i64>* %c ret void - ; FIXME: This code is correct, but poor. Ideally it would be similar to - ; the code in @false_v4f32 + ; (setcc $a, $b, SETFALSE) is always folded ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0 - ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63 - ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63 - ; CHECK-DAG: st.d [[R4]], 0($4) + ; CHECK-DAG: st.w [[R1]], 0($4) ; CHECK: .size false_v2f64 } @@ -509,12 +506,9 @@ define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounw store <2 x i64> %4, <2 x i64>* %c ret void - ; FIXME: This code is correct, but poor. Ideally it would be similar to - ; the code in @true_v4f32 - ; CHECK-DAG: ldi.d [[R1:\$w[0-9]+]], 1 - ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63 - ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63 - ; CHECK-DAG: st.d [[R4]], 0($4) + ; (setcc $a, $b, SETTRUE) is always folded. + ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1 + ; CHECK-DAG: st.w [[R1]], 0($4) ; CHECK: .size true_v2f64 } @@ -531,7 +525,8 @@ define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, %4 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <4 x float> %5, <4 x float>* %d ; CHECK-DAG: st.w [[R4]], 0($4) @@ -552,7 +547,8 @@ define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, %4 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3 - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]] store <2 x double> %5, <2 x double>* %d ; CHECK-DAG: st.d [[R4]], 0($4) @@ -571,7 +567,8 @@ define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, %3 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]] store <4 x float> %4, <4 x float>* %d ; CHECK-DAG: st.w [[R4]], 0($4) @@ -590,7 +587,8 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, %3 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer - ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]] + ; Note that IfSet and IfClr are swapped since the condition is inverted + ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]] store <2 x double> %4, <2 x double>* %d ; CHECK-DAG: st.d [[R4]], 0($4) diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll index ed3e52c..0dd75fa 100644 --- a/test/CodeGen/Mips/msa/elm_copy.ll +++ b/test/CodeGen/Mips/msa/elm_copy.ll @@ -1,8 +1,14 @@ ; Test the MSA intrinsics that are encoded with the ELM instruction format and ; are element extraction operations. -; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s -; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s +; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 +; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 @llvm_mips_copy_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_copy_s_b_RES = global i32 0, align 16 @@ -17,11 +23,15 @@ entry: declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind -; CHECK: llvm_mips_copy_s_b_test: -; CHECK: ld.b -; CHECK: copy_s.b -; CHECK: sw -; CHECK: .size llvm_mips_copy_s_b_test +; MIPS-ANY: llvm_mips_copy_s_b_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_b_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_b_ARG1) +; MIPS-ANY-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_s.b [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_b_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_b_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_s_b_test ; @llvm_mips_copy_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_copy_s_h_RES = global i32 0, align 16 @@ -36,11 +46,15 @@ entry: declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind -; CHECK: llvm_mips_copy_s_h_test: -; CHECK: ld.h -; CHECK: copy_s.h -; CHECK: sw -; CHECK: .size llvm_mips_copy_s_h_test +; MIPS-ANY: llvm_mips_copy_s_h_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_h_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_h_ARG1) +; MIPS-ANY-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_s.h [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_h_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_h_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_s_h_test ; @llvm_mips_copy_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_copy_s_w_RES = global i32 0, align 16 @@ -55,11 +69,15 @@ entry: declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind -; CHECK: llvm_mips_copy_s_w_test: -; CHECK: ld.w -; CHECK: copy_s.w -; CHECK: sw -; CHECK: .size llvm_mips_copy_s_w_test +; MIPS-ANY: llvm_mips_copy_s_w_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_w_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_w_ARG1) +; MIPS-ANY-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_s.w [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_w_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_w_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_s_w_test ; @llvm_mips_copy_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_copy_s_d_RES = global i64 0, align 16 @@ -74,13 +92,20 @@ entry: declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind -; CHECK: llvm_mips_copy_s_d_test: -; CHECK: ld.w -; CHECK: copy_s.w -; CHECK: copy_s.w -; CHECK: sw -; CHECK: sw -; CHECK: .size llvm_mips_copy_s_d_test +; MIPS-ANY: llvm_mips_copy_s_d_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_d_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_d_ARG1) +; MIPS32-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS64-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS32-DAG: copy_s.w [[RD1:\$[0-9]+]], [[WS]][2] +; MIPS32-DAG: copy_s.w [[RD2:\$[0-9]+]], [[WS]][3] +; MIPS64-DAG: copy_s.d [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_d_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_d_RES) +; MIPS32-DAG: sw [[RD1]], 0([[RES]]) +; MIPS32-DAG: sw [[RD2]], 4([[RES]]) +; MIPS64-DAG: sd [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_s_d_test ; @llvm_mips_copy_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_copy_u_b_RES = global i32 0, align 16 @@ -95,11 +120,15 @@ entry: declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind -; CHECK: llvm_mips_copy_u_b_test: -; CHECK: ld.b -; CHECK: copy_u.b -; CHECK: sw -; CHECK: .size llvm_mips_copy_u_b_test +; MIPS-ANY: llvm_mips_copy_u_b_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_b_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_b_ARG1) +; MIPS-ANY-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_u.b [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_b_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_b_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_u_b_test ; @llvm_mips_copy_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_copy_u_h_RES = global i32 0, align 16 @@ -114,11 +143,15 @@ entry: declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind -; CHECK: llvm_mips_copy_u_h_test: -; CHECK: ld.h -; CHECK: copy_u.h -; CHECK: sw -; CHECK: .size llvm_mips_copy_u_h_test +; MIPS-ANY: llvm_mips_copy_u_h_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_h_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_h_ARG1) +; MIPS-ANY-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_u.h [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_h_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_h_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_u_h_test ; @llvm_mips_copy_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_copy_u_w_RES = global i32 0, align 16 @@ -133,11 +166,15 @@ entry: declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind -; CHECK: llvm_mips_copy_u_w_test: -; CHECK: ld.w -; CHECK: copy_u.w -; CHECK: sw -; CHECK: .size llvm_mips_copy_u_w_test +; MIPS-ANY: llvm_mips_copy_u_w_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_w_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_w_ARG1) +; MIPS-ANY-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: copy_u.w [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_w_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_w_RES) +; MIPS-ANY-DAG: sw [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_u_w_test ; @llvm_mips_copy_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_copy_u_d_RES = global i64 0, align 16 @@ -152,11 +189,18 @@ entry: declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) nounwind -; CHECK: llvm_mips_copy_u_d_test: -; CHECK: ld.w -; CHECK: copy_s.w -; CHECK: copy_s.w -; CHECK: sw -; CHECK: sw -; CHECK: .size llvm_mips_copy_u_d_test +; MIPS-ANY: llvm_mips_copy_u_d_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_d_ARG1) +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_d_ARG1) +; MIPS32-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS64-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]]) +; MIPS32-DAG: copy_s.w [[RD1:\$[0-9]+]], [[WS]][2] +; MIPS32-DAG: copy_s.w [[RD2:\$[0-9]+]], [[WS]][3] +; MIPS64-DAG: copy_u.d [[RD:\$[0-9]+]], [[WS]][1] +; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_d_RES) +; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_d_RES) +; MIPS32-DAG: sw [[RD1]], 0([[RES]]) +; MIPS32-DAG: sw [[RD2]], 4([[RES]]) +; MIPS64-DAG: sd [[RD]], 0([[RES]]) +; MIPS-ANY: .size llvm_mips_copy_u_d_test ; diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll index fa7ceaf..c746e52 100644 --- a/test/CodeGen/Mips/msa/elm_insv.ll +++ b/test/CodeGen/Mips/msa/elm_insv.ll @@ -1,8 +1,14 @@ ; Test the MSA element insertion intrinsics that are encoded with the ELM ; instruction format. -; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s -; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s +; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32 +; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 +; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64 @llvm_mips_insert_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_insert_b_ARG3 = global i32 27, align 16 @@ -19,12 +25,12 @@ entry: declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind -; CHECK: llvm_mips_insert_b_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], 0( -; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0( -; CHECK-DAG: insert.b [[R2]][1], [[R1]] -; CHECK-DAG: st.b [[R2]], 0( -; CHECK: .size llvm_mips_insert_b_test +; MIPS-ANY: llvm_mips_insert_b_test: +; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0( +; MIPS-ANY-DAG: ld.b [[R2:\$w[0-9]+]], 0( +; MIPS-ANY-DAG: insert.b [[R2]][1], [[R1]] +; MIPS-ANY-DAG: st.b [[R2]], 0( +; MIPS-ANY: .size llvm_mips_insert_b_test ; @llvm_mips_insert_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_insert_h_ARG3 = global i32 27, align 16 @@ -41,12 +47,12 @@ entry: declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind -; CHECK: llvm_mips_insert_h_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], 0( -; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0( -; CHECK-DAG: insert.h [[R2]][1], [[R1]] -; CHECK-DAG: st.h [[R2]], 0( -; CHECK: .size llvm_mips_insert_h_test +; MIPS-ANY: llvm_mips_insert_h_test: +; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0( +; MIPS-ANY-DAG: ld.h [[R2:\$w[0-9]+]], 0( +; MIPS-ANY-DAG: insert.h [[R2]][1], [[R1]] +; MIPS-ANY-DAG: st.h [[R2]], 0( +; MIPS-ANY: .size llvm_mips_insert_h_test ; @llvm_mips_insert_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_insert_w_ARG3 = global i32 27, align 16 @@ -63,12 +69,12 @@ entry: declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind -; CHECK: llvm_mips_insert_w_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], 0( -; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0( -; CHECK-DAG: insert.w [[R2]][1], [[R1]] -; CHECK-DAG: st.w [[R2]], 0( -; CHECK: .size llvm_mips_insert_w_test +; MIPS-ANY: llvm_mips_insert_w_test: +; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0( +; MIPS-ANY-DAG: ld.w [[R2:\$w[0-9]+]], 0( +; MIPS-ANY-DAG: insert.w [[R2]][1], [[R1]] +; MIPS-ANY-DAG: st.w [[R2]], 0( +; MIPS-ANY: .size llvm_mips_insert_w_test ; @llvm_mips_insert_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_insert_d_ARG3 = global i64 27, align 16 @@ -85,14 +91,18 @@ entry: declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind -; CHECK: llvm_mips_insert_d_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], 0( -; CHECK-DAG: lw [[R2:\$[0-9]+]], 4( -; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], -; CHECK-DAG: insert.w [[R3]][2], [[R1]] -; CHECK-DAG: insert.w [[R3]][3], [[R2]] -; CHECK-DAG: st.w [[R3]], -; CHECK: .size llvm_mips_insert_d_test +; MIPS-ANY: llvm_mips_insert_d_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], 0( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], 4( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], 0( +; MIPS32-DAG: ld.w [[R3:\$w[0-9]+]], +; MIPS64-DAG: ld.d [[W1:\$w[0-9]+]], +; MIPS32-DAG: insert.w [[R3]][2], [[R1]] +; MIPS32-DAG: insert.w [[R3]][3], [[R2]] +; MIPS64-DAG: insert.d [[W1]][1], [[R1]] +; MIPS32-DAG: st.w [[R3]], +; MIPS64-DAG: st.d [[W1]], +; MIPS-ANY: .size llvm_mips_insert_d_test ; @llvm_mips_insve_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_insve_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 @@ -109,14 +119,16 @@ entry: declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind -; CHECK: llvm_mips_insve_b_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)( -; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)( -; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: insve.b [[R3]][1], [[R4]][0] -; CHECK-DAG: st.b [[R3]], -; CHECK: .size llvm_mips_insve_b_test +; MIPS-ANY: llvm_mips_insve_b_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_b_ARG1)( +; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_b_ARG3)( +; MIPS-ANY-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]]) +; MIPS-ANY-DAG: insve.b [[R3]][1], [[R4]][0] +; MIPS-ANY-DAG: st.b [[R3]], +; MIPS-ANY: .size llvm_mips_insve_b_test ; @llvm_mips_insve_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_insve_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 @@ -133,14 +145,16 @@ entry: declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind -; CHECK: llvm_mips_insve_h_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)( -; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)( -; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: insve.h [[R3]][1], [[R4]][0] -; CHECK-DAG: st.h [[R3]], -; CHECK: .size llvm_mips_insve_h_test +; MIPS-ANY: llvm_mips_insve_h_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_h_ARG1)( +; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_h_ARG3)( +; MIPS-ANY-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]]) +; MIPS-ANY-DAG: insve.h [[R3]][1], [[R4]][0] +; MIPS-ANY-DAG: st.h [[R3]], +; MIPS-ANY: .size llvm_mips_insve_h_test ; @llvm_mips_insve_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_insve_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 @@ -157,14 +171,16 @@ entry: declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind -; CHECK: llvm_mips_insve_w_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)( -; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)( -; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: insve.w [[R3]][1], [[R4]][0] -; CHECK-DAG: st.w [[R3]], -; CHECK: .size llvm_mips_insve_w_test +; MIPS-ANY: llvm_mips_insve_w_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_w_ARG1)( +; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_w_ARG3)( +; MIPS-ANY-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]]) +; MIPS-ANY-DAG: insve.w [[R3]][1], [[R4]][0] +; MIPS-ANY-DAG: st.w [[R3]], +; MIPS-ANY: .size llvm_mips_insve_w_test ; @llvm_mips_insve_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_insve_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16 @@ -181,12 +197,14 @@ entry: declare <2 x i64> @llvm.mips.insve.d(<2 x i64>, i32, <2 x i64>) nounwind -; CHECK: llvm_mips_insve_d_test: -; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)( -; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)( -; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]]) -; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: insve.d [[R3]][1], [[R4]][0] -; CHECK-DAG: st.d [[R3]], -; CHECK: .size llvm_mips_insve_d_test +; MIPS-ANY: llvm_mips_insve_d_test: +; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)( +; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)( +; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_d_ARG1)( +; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_d_ARG3)( +; MIPS-ANY-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]]) +; MIPS-ANY-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]]) +; MIPS-ANY-DAG: insve.d [[R3]][1], [[R4]][0] +; MIPS-ANY-DAG: st.d [[R3]], +; MIPS-ANY: .size llvm_mips_insve_d_test ; diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll index 39d670d..00a6544 100644 --- a/test/CodeGen/Mips/msa/elm_shift_slide.ll +++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll @@ -5,17 +5,19 @@ ; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s @llvm_mips_sldi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 +@llvm_mips_sldi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_sldi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 define void @llvm_mips_sldi_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1 - %1 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, i32 1) - store <16 x i8> %1, <16 x i8>* @llvm_mips_sldi_b_RES + %1 = load <16 x i8>* @llvm_mips_sldi_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1) + store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES ret void } -declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind +declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind ; CHECK: llvm_mips_sldi_b_test: ; CHECK: ld.b @@ -24,17 +26,19 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind ; CHECK: .size llvm_mips_sldi_b_test ; @llvm_mips_sldi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 +@llvm_mips_sldi_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_sldi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 define void @llvm_mips_sldi_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1 - %1 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, i32 1) - store <8 x i16> %1, <8 x i16>* @llvm_mips_sldi_h_RES + %1 = load <8 x i16>* @llvm_mips_sldi_h_ARG2 + %2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1) + store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES ret void } -declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind +declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind ; CHECK: llvm_mips_sldi_h_test: ; CHECK: ld.h @@ -43,17 +47,19 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind ; CHECK: .size llvm_mips_sldi_h_test ; @llvm_mips_sldi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 +@llvm_mips_sldi_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_sldi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 define void @llvm_mips_sldi_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1 - %1 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, i32 1) - store <4 x i32> %1, <4 x i32>* @llvm_mips_sldi_w_RES + %1 = load <4 x i32>* @llvm_mips_sldi_w_ARG2 + %2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1) + store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES ret void } -declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind +declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind ; CHECK: llvm_mips_sldi_w_test: ; CHECK: ld.w @@ -62,17 +68,19 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind ; CHECK: .size llvm_mips_sldi_w_test ; @llvm_mips_sldi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 +@llvm_mips_sldi_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_sldi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 define void @llvm_mips_sldi_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1 - %1 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, i32 1) - store <2 x i64> %1, <2 x i64>* @llvm_mips_sldi_d_RES + %1 = load <2 x i64>* @llvm_mips_sldi_d_ARG2 + %2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1) + store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES ret void } -declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, i32) nounwind +declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind ; CHECK: llvm_mips_sldi_d_test: ; CHECK: ld.d diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll index 3088e1b..07e67bf 100644 --- a/test/CodeGen/Mips/msa/frameindex.ll +++ b/test/CodeGen/Mips/msa/frameindex.ll @@ -83,3 +83,312 @@ define void @loadstore_v16i8_just_over_simm16() nounwind { ret void ; MIPS32-AE: .size loadstore_v16i8_just_over_simm16 } + +define void @loadstore_v8i16_near() nounwind { + ; MIPS32-AE: loadstore_v8i16_near: + + %1 = alloca <8 x i16> + %2 = load volatile <8 x i16>* %1 + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0($sp) + store volatile <8 x i16> %2, <8 x i16>* %1 + ; MIPS32-AE: st.h [[R1]], 0($sp) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_near +} + +define void @loadstore_v8i16_unaligned() nounwind { + ; MIPS32-AE: loadstore_v8i16_unaligned: + + %1 = alloca [2 x <8 x i16>] + %2 = bitcast [2 x <8 x i16>]* %1 to i8* + %3 = getelementptr i8* %2, i32 1 + %4 = bitcast i8* %3 to [2 x <8 x i16>]* + %5 = getelementptr [2 x <8 x i16>]* %4, i32 0, i32 0 + + %6 = load volatile <8 x i16>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <8 x i16> %6, <8 x i16>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: st.h [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_unaligned +} + +define void @loadstore_v8i16_just_under_simm10() nounwind { + ; MIPS32-AE: loadstore_v8i16_just_under_simm10: + + %1 = alloca <8 x i16> + %2 = alloca [1008 x i8] ; Push the frame right up to 1024 bytes + + %3 = load volatile <8 x i16>* %1 + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 1008($sp) + store volatile <8 x i16> %3, <8 x i16>* %1 + ; MIPS32-AE: st.h [[R1]], 1008($sp) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_just_under_simm10 +} + +define void @loadstore_v8i16_just_over_simm10() nounwind { + ; MIPS32-AE: loadstore_v8i16_just_over_simm10: + + %1 = alloca <8 x i16> + %2 = alloca [1009 x i8] ; Push the frame just over 1024 bytes + + %3 = load volatile <8 x i16>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1024 + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <8 x i16> %3, <8 x i16>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1024 + ; MIPS32-AE: st.h [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_just_over_simm10 +} + +define void @loadstore_v8i16_just_under_simm16() nounwind { + ; MIPS32-AE: loadstore_v8i16_just_under_simm16: + + %1 = alloca <8 x i16> + %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes + + %3 = load volatile <8 x i16>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <8 x i16> %3, <8 x i16>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.h [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_just_under_simm16 +} + +define void @loadstore_v8i16_just_over_simm16() nounwind { + ; MIPS32-AE: loadstore_v8i16_just_over_simm16: + + %1 = alloca <8 x i16> + %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes + + %3 = load volatile <8 x i16>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <8 x i16> %3, <8 x i16>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.h [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v8i16_just_over_simm16 +} + +define void @loadstore_v4i32_near() nounwind { + ; MIPS32-AE: loadstore_v4i32_near: + + %1 = alloca <4 x i32> + %2 = load volatile <4 x i32>* %1 + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0($sp) + store volatile <4 x i32> %2, <4 x i32>* %1 + ; MIPS32-AE: st.w [[R1]], 0($sp) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_near +} + +define void @loadstore_v4i32_unaligned() nounwind { + ; MIPS32-AE: loadstore_v4i32_unaligned: + + %1 = alloca [2 x <4 x i32>] + %2 = bitcast [2 x <4 x i32>]* %1 to i8* + %3 = getelementptr i8* %2, i32 1 + %4 = bitcast i8* %3 to [2 x <4 x i32>]* + %5 = getelementptr [2 x <4 x i32>]* %4, i32 0, i32 0 + + %6 = load volatile <4 x i32>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <4 x i32> %6, <4 x i32>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: st.w [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_unaligned +} + +define void @loadstore_v4i32_just_under_simm10() nounwind { + ; MIPS32-AE: loadstore_v4i32_just_under_simm10: + + %1 = alloca <4 x i32> + %2 = alloca [2032 x i8] ; Push the frame right up to 2048 bytes + + %3 = load volatile <4 x i32>* %1 + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 2032($sp) + store volatile <4 x i32> %3, <4 x i32>* %1 + ; MIPS32-AE: st.w [[R1]], 2032($sp) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_just_under_simm10 +} + +define void @loadstore_v4i32_just_over_simm10() nounwind { + ; MIPS32-AE: loadstore_v4i32_just_over_simm10: + + %1 = alloca <4 x i32> + %2 = alloca [2033 x i8] ; Push the frame just over 2048 bytes + + %3 = load volatile <4 x i32>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 2048 + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <4 x i32> %3, <4 x i32>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 2048 + ; MIPS32-AE: st.w [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_just_over_simm10 +} + +define void @loadstore_v4i32_just_under_simm16() nounwind { + ; MIPS32-AE: loadstore_v4i32_just_under_simm16: + + %1 = alloca <4 x i32> + %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes + + %3 = load volatile <4 x i32>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <4 x i32> %3, <4 x i32>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.w [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_just_under_simm16 +} + +define void @loadstore_v4i32_just_over_simm16() nounwind { + ; MIPS32-AE: loadstore_v4i32_just_over_simm16: + + %1 = alloca <4 x i32> + %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes + + %3 = load volatile <4 x i32>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <4 x i32> %3, <4 x i32>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.w [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v4i32_just_over_simm16 +} + +define void @loadstore_v2i64_near() nounwind { + ; MIPS32-AE: loadstore_v2i64_near: + + %1 = alloca <2 x i64> + %2 = load volatile <2 x i64>* %1 + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0($sp) + store volatile <2 x i64> %2, <2 x i64>* %1 + ; MIPS32-AE: st.d [[R1]], 0($sp) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_near +} + +define void @loadstore_v2i64_unaligned() nounwind { + ; MIPS32-AE: loadstore_v2i64_unaligned: + + %1 = alloca [2 x <2 x i64>] + %2 = bitcast [2 x <2 x i64>]* %1 to i8* + %3 = getelementptr i8* %2, i32 1 + %4 = bitcast i8* %3 to [2 x <2 x i64>]* + %5 = getelementptr [2 x <2 x i64>]* %4, i32 0, i32 0 + + %6 = load volatile <2 x i64>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <2 x i64> %6, <2 x i64>* %5 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1 + ; MIPS32-AE: st.d [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_unaligned +} + +define void @loadstore_v2i64_just_under_simm10() nounwind { + ; MIPS32-AE: loadstore_v2i64_just_under_simm10: + + %1 = alloca <2 x i64> + %2 = alloca [4080 x i8] ; Push the frame right up to 4096 bytes + + %3 = load volatile <2 x i64>* %1 + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 4080($sp) + store volatile <2 x i64> %3, <2 x i64>* %1 + ; MIPS32-AE: st.d [[R1]], 4080($sp) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_just_under_simm10 +} + +define void @loadstore_v2i64_just_over_simm10() nounwind { + ; MIPS32-AE: loadstore_v2i64_just_over_simm10: + + %1 = alloca <2 x i64> + %2 = alloca [4081 x i8] ; Push the frame just over 4096 bytes + + %3 = load volatile <2 x i64>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 4096 + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <2 x i64> %3, <2 x i64>* %1 + ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 4096 + ; MIPS32-AE: st.d [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_just_over_simm10 +} + +define void @loadstore_v2i64_just_under_simm16() nounwind { + ; MIPS32-AE: loadstore_v2i64_just_under_simm16: + + %1 = alloca <2 x i64> + %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes + + %3 = load volatile <2 x i64>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <2 x i64> %3, <2 x i64>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.d [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_just_under_simm16 +} + +define void @loadstore_v2i64_just_over_simm16() nounwind { + ; MIPS32-AE: loadstore_v2i64_just_over_simm16: + + %1 = alloca <2 x i64> + %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes + + %3 = load volatile <2 x i64>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) + store volatile <2 x i64> %3, <2 x i64>* %1 + ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768 + ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]] + ; MIPS32-AE: st.d [[R1]], 0([[BASE]]) + + ret void + ; MIPS32-AE: .size loadstore_v2i64_just_over_simm16 +} diff --git a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll index 24e27cb..f25ab22 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll @@ -10,7 +10,7 @@ ; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to ; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt ; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The -; assertion occured when trying to check the values were bitwise inverses of +; assertion occurred when trying to check the values were bitwise inverses of ; each-other. ; ; It should at least successfully build. diff --git a/test/CodeGen/Mips/msa/shift-dagcombine.ll b/test/CodeGen/Mips/msa/shift-dagcombine.ll index 0d809fb..322acff 100644 --- a/test/CodeGen/Mips/msa/shift-dagcombine.ll +++ b/test/CodeGen/Mips/msa/shift-dagcombine.ll @@ -37,7 +37,8 @@ define void @lshr_v4i32(<4 x i32>* %c) nounwind { %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>, <i32 0, i32 1, i32 2, i32 3> ; CHECK-NOT: srl - ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[CPOOL:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0([[CPOOL]]) ; CHECK-NOT: srl store volatile <4 x i32> %2, <4 x i32>* %c ; CHECK-DAG: st.w [[R1]], 0($4) diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll index 316c669..faeec5d 100644 --- a/test/CodeGen/Mips/msa/shuffle.ll +++ b/test/CodeGen/Mips/msa/shuffle.ll @@ -7,7 +7,8 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind %1 = load <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> - ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]] store <16 x i8> %2, <16 x i8>* %c ; CHECK-DAG: st.b [[R3]], 0($4) @@ -37,7 +38,8 @@ define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind %2 = load <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16> - ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]] store <16 x i8> %3, <16 x i8>* %c ; CHECK-DAG: st.b [[R3]], 0($4) @@ -54,8 +56,11 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind %2 = load <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> - ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo - ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]] + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]]) + ; The concatenation step of vshf is bitwise not vectorwise so we must reverse + ; the operands to get the right answer. + ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R1]] store <16 x i8> %3, <16 x i8>* %c ; CHECK-DAG: st.b [[R3]], 0($4) @@ -83,7 +88,8 @@ define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind %1 = load <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> - ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]] store <8 x i16> %2, <8 x i16>* %c ; CHECK-DAG: st.h [[R3]], 0($4) @@ -113,7 +119,8 @@ define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind %2 = load <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8> - ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]] store <8 x i16> %3, <8 x i16>* %c ; CHECK-DAG: st.h [[R3]], 0($4) @@ -130,8 +137,11 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind %2 = load <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> - ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo - ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]] + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]]) + ; The concatenation step of vshf is bitwise not vectorwise so we must reverse + ; the operands to get the right answer. + ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R1]] store <8 x i16> %3, <8 x i16>* %c ; CHECK-DAG: st.h [[R3]], 0($4) @@ -207,8 +217,11 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind %2 = load <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4> - ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo - ; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]] + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[PTR_A]]) + ; The concatenation step of vshf is bitwise not vectorwise so we must reverse + ; the operands to get the right answer. + ; CHECK-DAG: vshf.w [[R3]], [[R2]], [[R1]] store <4 x i32> %3, <4 x i32>* %c ; CHECK-DAG: st.w [[R3]], 0($4) @@ -236,7 +249,8 @@ define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind %1 = load <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0> - ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]] store <2 x i64> %2, <2 x i64>* %c ; CHECK-DAG: st.d [[R3]], 0($4) @@ -266,7 +280,8 @@ define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind %2 = load <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2> - ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]]) ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]] store <2 x i64> %3, <2 x i64>* %c ; CHECK-DAG: st.d [[R3]], 0($4) @@ -283,8 +298,11 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind %2 = load <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2> - ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo - ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]] + ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]]) + ; The concatenation step of vshf is bitwise not vectorwise so we must reverse + ; the operands to get the right answer. + ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R1]] store <2 x i64> %3, <2 x i64>* %c ; CHECK-DAG: st.d [[R3]], 0($4) diff --git a/test/CodeGen/Mips/msa/special.ll b/test/CodeGen/Mips/msa/special.ll index 60a4369..f65a14f 100644 --- a/test/CodeGen/Mips/msa/special.ll +++ b/test/CodeGen/Mips/msa/special.ll @@ -1,6 +1,9 @@ ; Test the MSA intrinsics that are encoded with the SPECIAL instruction format. -; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s +; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s --check-prefix=MIPS32 +; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \ +; RUN: FileCheck %s --check-prefix=MIPS64 define i32 @llvm_mips_lsa_test(i32 %a, i32 %b) nounwind { entry: @@ -10,9 +13,9 @@ entry: declare i32 @llvm.mips.lsa(i32, i32, i32) nounwind -; CHECK: llvm_mips_lsa_test: -; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2 -; CHECK: .size llvm_mips_lsa_test +; MIPS32: llvm_mips_lsa_test: +; MIPS32: lsa {{\$[0-9]+}}, $5, $4, 2 +; MIPS32: .size llvm_mips_lsa_test define i32 @lsa_test(i32 %a, i32 %b) nounwind { entry: @@ -21,6 +24,29 @@ entry: ret i32 %1 } -; CHECK: lsa_test: -; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2 -; CHECK: .size lsa_test +; MIPS32: lsa_test: +; MIPS32: lsa {{\$[0-9]+}}, $5, $4, 2 +; MIPS32: .size lsa_test + +define i64 @llvm_mips_dlsa_test(i64 %a, i64 %b) nounwind { +entry: + %0 = tail call i64 @llvm.mips.dlsa(i64 %a, i64 %b, i32 2) + ret i64 %0 +} + +declare i64 @llvm.mips.dlsa(i64, i64, i32) nounwind + +; MIPS64: llvm_mips_dlsa_test: +; MIPS64: dlsa {{\$[0-9]+}}, $5, $4, 2 +; MIPS64: .size llvm_mips_dlsa_test + +define i64 @dlsa_test(i64 %a, i64 %b) nounwind { +entry: + %0 = shl i64 %b, 2 + %1 = add i64 %a, %0 + ret i64 %1 +} + +; MIPS64: dlsa_test: +; MIPS64: dlsa {{\$[0-9]+}}, $5, $4, 2 +; MIPS64: .size dlsa_test diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll index 5bddf5a..d5b97f5 100644 --- a/test/CodeGen/Mips/msa/vec.ll +++ b/test/CodeGen/Mips/msa/vec.ll @@ -104,12 +104,12 @@ entry: ret void } -; CHECK: and_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: and.v -; CHECK: st.b -; CHECK: .size and_v_b_test +; ANYENDIAN: and_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: and.v +; ANYENDIAN: st.b +; ANYENDIAN: .size and_v_b_test ; define void @and_v_h_test() nounwind { entry: @@ -120,12 +120,12 @@ entry: ret void } -; CHECK: and_v_h_test: -; CHECK: ld.h -; CHECK: ld.h -; CHECK: and.v -; CHECK: st.h -; CHECK: .size and_v_h_test +; ANYENDIAN: and_v_h_test: +; ANYENDIAN: ld.h +; ANYENDIAN: ld.h +; ANYENDIAN: and.v +; ANYENDIAN: st.h +; ANYENDIAN: .size and_v_h_test ; define void @and_v_w_test() nounwind { @@ -137,12 +137,12 @@ entry: ret void } -; CHECK: and_v_w_test: -; CHECK: ld.w -; CHECK: ld.w -; CHECK: and.v -; CHECK: st.w -; CHECK: .size and_v_w_test +; ANYENDIAN: and_v_w_test: +; ANYENDIAN: ld.w +; ANYENDIAN: ld.w +; ANYENDIAN: and.v +; ANYENDIAN: st.w +; ANYENDIAN: .size and_v_w_test ; define void @and_v_d_test() nounwind { @@ -154,12 +154,12 @@ entry: ret void } -; CHECK: and_v_d_test: -; CHECK: ld.d -; CHECK: ld.d -; CHECK: and.v -; CHECK: st.d -; CHECK: .size and_v_d_test +; ANYENDIAN: and_v_d_test: +; ANYENDIAN: ld.d +; ANYENDIAN: ld.d +; ANYENDIAN: and.v +; ANYENDIAN: st.d +; ANYENDIAN: .size and_v_d_test ; @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 @@ -431,9 +431,9 @@ entry: ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]]) ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]]) -; bmnz.v is the same as bsel.v with wt and wd_in swapped -; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]] -; ANYENDIAN-DAG: st.b [[R6]], 0( +; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in) +; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]] +; ANYENDIAN-DAG: st.b [[R5]], 0( ; ANYENDIAN: .size llvm_mips_bsel_v_b_test @llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @@ -462,9 +462,9 @@ entry: ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]]) ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]]) -; bmnz.v is the same as bsel.v with wt and wd_in swapped -; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]] -; ANYENDIAN-DAG: st.b [[R6]], 0( +; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in) +; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]] +; ANYENDIAN-DAG: st.b [[R5]], 0( ; ANYENDIAN: .size llvm_mips_bsel_v_h_test @llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @@ -493,9 +493,9 @@ entry: ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]]) ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]]) -; bmnz.v is the same as bsel.v with wt and wd_in swapped -; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]] -; ANYENDIAN-DAG: st.b [[R6]], 0( +; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in) +; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]] +; ANYENDIAN-DAG: st.b [[R5]], 0( ; ANYENDIAN: .size llvm_mips_bsel_v_w_test @llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @@ -524,9 +524,9 @@ entry: ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]]) ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]]) -; bmnz.v is the same as bsel.v with wt and wd_in swapped -; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]] -; ANYENDIAN-DAG: st.b [[R6]], 0( +; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in) +; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]] +; ANYENDIAN-DAG: st.b [[R5]], 0( ; ANYENDIAN: .size llvm_mips_bsel_v_d_test @llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @@ -722,12 +722,12 @@ entry: ret void } -; CHECK: or_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: or.v -; CHECK: st.b -; CHECK: .size or_v_b_test +; ANYENDIAN: or_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: or.v +; ANYENDIAN: st.b +; ANYENDIAN: .size or_v_b_test ; define void @or_v_h_test() nounwind { entry: @@ -738,12 +738,12 @@ entry: ret void } -; CHECK: or_v_h_test: -; CHECK: ld.h -; CHECK: ld.h -; CHECK: or.v -; CHECK: st.h -; CHECK: .size or_v_h_test +; ANYENDIAN: or_v_h_test: +; ANYENDIAN: ld.h +; ANYENDIAN: ld.h +; ANYENDIAN: or.v +; ANYENDIAN: st.h +; ANYENDIAN: .size or_v_h_test ; define void @or_v_w_test() nounwind { @@ -755,12 +755,12 @@ entry: ret void } -; CHECK: or_v_w_test: -; CHECK: ld.w -; CHECK: ld.w -; CHECK: or.v -; CHECK: st.w -; CHECK: .size or_v_w_test +; ANYENDIAN: or_v_w_test: +; ANYENDIAN: ld.w +; ANYENDIAN: ld.w +; ANYENDIAN: or.v +; ANYENDIAN: st.w +; ANYENDIAN: .size or_v_w_test ; define void @or_v_d_test() nounwind { @@ -772,12 +772,12 @@ entry: ret void } -; CHECK: or_v_d_test: -; CHECK: ld.d -; CHECK: ld.d -; CHECK: or.v -; CHECK: st.d -; CHECK: .size or_v_d_test +; ANYENDIAN: or_v_d_test: +; ANYENDIAN: ld.d +; ANYENDIAN: ld.d +; ANYENDIAN: or.v +; ANYENDIAN: st.d +; ANYENDIAN: .size or_v_d_test ; @llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 @@ -880,12 +880,12 @@ entry: ret void } -; CHECK: xor_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: xor.v -; CHECK: st.b -; CHECK: .size xor_v_b_test +; ANYENDIAN: xor_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: xor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size xor_v_b_test ; define void @xor_v_h_test() nounwind { entry: @@ -896,12 +896,12 @@ entry: ret void } -; CHECK: xor_v_h_test: -; CHECK: ld.h -; CHECK: ld.h -; CHECK: xor.v -; CHECK: st.h -; CHECK: .size xor_v_h_test +; ANYENDIAN: xor_v_h_test: +; ANYENDIAN: ld.h +; ANYENDIAN: ld.h +; ANYENDIAN: xor.v +; ANYENDIAN: st.h +; ANYENDIAN: .size xor_v_h_test ; define void @xor_v_w_test() nounwind { @@ -913,12 +913,12 @@ entry: ret void } -; CHECK: xor_v_w_test: -; CHECK: ld.w -; CHECK: ld.w -; CHECK: xor.v -; CHECK: st.w -; CHECK: .size xor_v_w_test +; ANYENDIAN: xor_v_w_test: +; ANYENDIAN: ld.w +; ANYENDIAN: ld.w +; ANYENDIAN: xor.v +; ANYENDIAN: st.w +; ANYENDIAN: .size xor_v_w_test ; define void @xor_v_d_test() nounwind { @@ -930,12 +930,12 @@ entry: ret void } -; CHECK: xor_v_d_test: -; CHECK: ld.d -; CHECK: ld.d -; CHECK: xor.v -; CHECK: st.d -; CHECK: .size xor_v_d_test +; ANYENDIAN: xor_v_d_test: +; ANYENDIAN: ld.d +; ANYENDIAN: ld.d +; ANYENDIAN: xor.v +; ANYENDIAN: st.d +; ANYENDIAN: .size xor_v_d_test ; declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind |