diff options
author | Daniel Sanders <daniel.sanders@imgtec.com> | 2013-09-23 14:29:55 +0000 |
---|---|---|
committer | Daniel Sanders <daniel.sanders@imgtec.com> | 2013-09-23 14:29:55 +0000 |
commit | e0187e51a17f2081d6a72a57e0fbba8ce38d9410 (patch) | |
tree | 53c1a00d7d3ee2183eec49395d367e07950e6265 /test/CodeGen | |
parent | 0f22c134be40a337b30e30bdafb9e8b6880dea1e (diff) | |
download | external_llvm-e0187e51a17f2081d6a72a57e0fbba8ce38d9410.zip external_llvm-e0187e51a17f2081d6a72a57e0fbba8ce38d9410.tar.gz external_llvm-e0187e51a17f2081d6a72a57e0fbba8ce38d9410.tar.bz2 |
[mips][msa] Added support for matching addvi, and subvi from normal IR (i.e. not intrinsics)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191203 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/Mips/msa/arithmetic.ll | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/test/CodeGen/Mips/msa/arithmetic.ll b/test/CodeGen/Mips/msa/arithmetic.ll index 303778f..d695f12 100644 --- a/test/CodeGen/Mips/msa/arithmetic.ll +++ b/test/CodeGen/Mips/msa/arithmetic.ll @@ -63,6 +63,63 @@ define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ret void ; CHECK: .size add_v2i64 } + +define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { + ; CHECK: add_v16i8_i: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + ; CHECK-DAG: addvi.b [[R3:\$w[0-9]+]], [[R1]], 1 + store <16 x i8> %2, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size add_v16i8_i +} + +define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { + ; CHECK: add_v8i16_i: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + ; CHECK-DAG: addvi.h [[R3:\$w[0-9]+]], [[R1]], 1 + store <8 x i16> %2, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size add_v8i16_i +} + +define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { + ; CHECK: add_v4i32_i: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> + ; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1 + store <4 x i32> %2, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size add_v4i32_i +} + +define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { + ; CHECK: add_v2i64_i: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = add <2 x i64> %1, <i64 1, i64 1> + ; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1 + store <2 x i64> %2, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size add_v2i64_i +} + define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sub_v16i8: @@ -127,6 +184,62 @@ define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: .size sub_v2i64 } +define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { + ; CHECK: sub_v16i8_i: + + %1 = load <16 x i8>* %a + ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) + %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + ; CHECK-DAG: subvi.b [[R3:\$w[0-9]+]], [[R1]], 1 + store <16 x i8> %2, <16 x i8>* %c + ; CHECK-DAG: st.b [[R3]], 0($4) + + ret void + ; CHECK: .size sub_v16i8_i +} + +define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { + ; CHECK: sub_v8i16_i: + + %1 = load <8 x i16>* %a + ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) + %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + ; CHECK-DAG: subvi.h [[R3:\$w[0-9]+]], [[R1]], 1 + store <8 x i16> %2, <8 x i16>* %c + ; CHECK-DAG: st.h [[R3]], 0($4) + + ret void + ; CHECK: .size sub_v8i16_i +} + +define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { + ; CHECK: sub_v4i32_i: + + %1 = load <4 x i32>* %a + ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) + %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> + ; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1 + store <4 x i32> %2, <4 x i32>* %c + ; CHECK-DAG: st.w [[R3]], 0($4) + + ret void + ; CHECK: .size sub_v4i32_i +} + +define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { + ; CHECK: sub_v2i64_i: + + %1 = load <2 x i64>* %a + ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) + %2 = sub <2 x i64> %1, <i64 1, i64 1> + ; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1 + store <2 x i64> %2, <2 x i64>* %c + ; CHECK-DAG: st.d [[R3]], 0($4) + + ret void + ; CHECK: .size sub_v2i64_i +} + define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mul_v16i8: |