diff options
author | Daniel Sanders <daniel.sanders@imgtec.com> | 2013-09-24 14:02:15 +0000 |
---|---|---|
committer | Daniel Sanders <daniel.sanders@imgtec.com> | 2013-09-24 14:02:15 +0000 |
commit | 7e0df9aa2966d0462e34511524a4958e226b74ee (patch) | |
tree | 6a8c5b2cc92494c83436fcc9576805366b30053f /test/CodeGen/Mips | |
parent | acfa5a203c01d99aac1bdc1e045c08153bcdbbf6 (diff) | |
download | external_llvm-7e0df9aa2966d0462e34511524a4958e226b74ee.zip external_llvm-7e0df9aa2966d0462e34511524a4958e226b74ee.tar.gz external_llvm-7e0df9aa2966d0462e34511524a4958e226b74ee.tar.bz2 |
[mips][msa] Added support for matching vshf from normal IR (i.e. not intrinsics)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191301 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Mips')
-rw-r--r-- | test/CodeGen/Mips/msa/3r-v.ll | 35 | ||||
-rw-r--r-- | test/CodeGen/Mips/msa/shuffle.ll | 313 |
2 files changed, 336 insertions, 12 deletions
diff --git a/test/CodeGen/Mips/msa/3r-v.ll b/test/CodeGen/Mips/msa/3r-v.ll index 055491d..544ae9f 100644 --- a/test/CodeGen/Mips/msa/3r-v.ll +++ b/test/CodeGen/Mips/msa/3r-v.ll @@ -5,84 +5,95 @@ @llvm_mips_vshf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 @llvm_mips_vshf_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 +@llvm_mips_vshf_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16 @llvm_mips_vshf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 define void @llvm_mips_vshf_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1 %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2 - %2 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1) - store <16 x i8> %2, <16 x i8>* @llvm_mips_vshf_b_RES + %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3 + %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) + store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES ret void } -declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>) nounwind +declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind ; CHECK: llvm_mips_vshf_b_test: ; CHECK: ld.b ; CHECK: ld.b +; CHECK: ld.b ; CHECK: vshf.b ; CHECK: st.b ; CHECK: .size llvm_mips_vshf_b_test ; @llvm_mips_vshf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 @llvm_mips_vshf_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 +@llvm_mips_vshf_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16 @llvm_mips_vshf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 define void @llvm_mips_vshf_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1 %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2 - %2 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1) - store <8 x i16> %2, <8 x i16>* @llvm_mips_vshf_h_RES + %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3 + %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) + store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES ret void } -declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>) nounwind +declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind ; CHECK: llvm_mips_vshf_h_test: ; CHECK: ld.h ; CHECK: ld.h +; CHECK: ld.h ; CHECK: vshf.h ; CHECK: st.h ; CHECK: .size llvm_mips_vshf_h_test ; @llvm_mips_vshf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 @llvm_mips_vshf_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 +@llvm_mips_vshf_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16 @llvm_mips_vshf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 define void @llvm_mips_vshf_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1 %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2 - %2 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1) - store <4 x i32> %2, <4 x i32>* @llvm_mips_vshf_w_RES + %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3 + %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) + store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES ret void } -declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>) nounwind +declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind ; CHECK: llvm_mips_vshf_w_test: ; CHECK: ld.w ; CHECK: ld.w +; CHECK: ld.w ; CHECK: vshf.w ; CHECK: st.w ; CHECK: .size llvm_mips_vshf_w_test ; @llvm_mips_vshf_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 @llvm_mips_vshf_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16 +@llvm_mips_vshf_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16 @llvm_mips_vshf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 define void @llvm_mips_vshf_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1 %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2 - %2 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1) - store <2 x i64> %2, <2 x i64>* @llvm_mips_vshf_d_RES + %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3 + %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) + store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES ret void } -declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>) nounwind +declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind ; CHECK: llvm_mips_vshf_d_test: ; CHECK: ld.d diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll new file mode 100644 index 0000000..35a5cf8 --- /dev/null +++ b/test/CodeGen/Mips/msa/shuffle.ll @@ -0,0 +1,313 @@ +; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s + +define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { + ; CHECK: vshf_v16i8_0: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]] + store <16 x i8> %2, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v16i8_0 +} + +define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { + ; CHECK: vshf_v16i8_1: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + ; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]] + store <16 x i8> %2, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v16i8_1 +} + +define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { + ; CHECK: vshf_v16i8_2: + + %1 = load <16 x i8>* %a + %2 = load <16 x i8>* %b + ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16> + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]] + store <16 x i8> %3, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v16i8_2 +} + +define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { + ; CHECK: vshf_v16i8_3: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = load <16 x i8>* %b + ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> + ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]] + store <16 x i8> %3, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v16i8_3 +} + +define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { + ; CHECK: vshf_v16i8_4: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17> + ; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]] + store <16 x i8> %2, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v16i8_4 +} + +define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { + ; CHECK: vshf_v8i16_0: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]] + store <8 x i16> %2, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v8i16_0 +} + +define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { + ; CHECK: vshf_v8i16_1: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]] + store <8 x i16> %2, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v8i16_1 +} + +define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { + ; CHECK: vshf_v8i16_2: + + %1 = load <8 x i16>* %a + %2 = load <8 x i16>* %b + ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8> + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]] + store <8 x i16> %3, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v8i16_2 +} + +define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { + ; CHECK: vshf_v8i16_3: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = load <8 x i16>* %b + ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3> + ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]] + store <8 x i16> %3, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v8i16_3 +} + +define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { + ; CHECK: vshf_v8i16_4: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9> + ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]] + store <8 x i16> %2, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v8i16_4 +} + +define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { + ; CHECK: vshf_v4i32_0: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> + ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]] + store <4 x i32> %2, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v4i32_0 +} + +define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { + ; CHECK: vshf_v4i32_1: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> + ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]] + store <4 x i32> %2, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v4i32_1 +} + +define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { + ; CHECK: vshf_v4i32_2: + + %1 = load <4 x i32>* %a + %2 = load <4 x i32>* %b + ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4> + ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R2]], [[R2]] + store <4 x i32> %3, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v4i32_2 +} + +define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { + ; CHECK: vshf_v4i32_3: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = load <4 x i32>* %b + ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4> + ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]] + store <4 x i32> %3, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v4i32_3 +} + +define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { + ; CHECK: vshf_v4i32_4: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1> + ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]] + store <4 x i32> %2, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v4i32_4 +} + +define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { + ; CHECK: vshf_v2i64_0: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0> + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]] + store <2 x i64> %2, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v2i64_0 +} + +define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { + ; CHECK: vshf_v2i64_1: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1> + ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]] + store <2 x i64> %2, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v2i64_1 +} + +define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { + ; CHECK: vshf_v2i64_2: + + %1 = load <2 x i64>* %a + %2 = load <2 x i64>* %b + ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2> + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]] + store <2 x i64> %3, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v2i64_2 +} + +define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { + ; CHECK: vshf_v2i64_3: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = load <2 x i64>* %b + ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) + %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2> + ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo + ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]] + store <2 x i64> %3, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v2i64_3 +} + +define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { + ; CHECK: vshf_v2i64_4: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3> + ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 + ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]] + store <2 x i64> %2, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size vshf_v2i64_4 +} |