diff options
author | Hao Liu <Hao.Liu@arm.com> | 2013-08-15 08:26:11 +0000 |
---|---|---|
committer | Hao Liu <Hao.Liu@arm.com> | 2013-08-15 08:26:11 +0000 |
commit | d9767021f83879429e930b068d1d6aef22285b33 (patch) | |
tree | 93c99311855843ce9f66f9990626667bbc9be5ab /test/CodeGen/AArch64 | |
parent | 46ceaf4ba64cdd0ac37578c0132cad39c9ea21c0 (diff) | |
download | external_llvm-d9767021f83879429e930b068d1d6aef22285b33.zip external_llvm-d9767021f83879429e930b068d1d6aef22285b33.tar.gz external_llvm-d9767021f83879429e930b068d1d6aef22285b33.tar.bz2 |
Clang and AArch64 backend patches to support shll/shl and vmovl instructions and ACLE functions
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188451 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r-- | test/CodeGen/AArch64/neon-shift-left-long.ll | 193 | ||||
-rw-r--r-- | test/CodeGen/AArch64/neon-shift.ll | 48 |
2 files changed, 241 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/neon-shift-left-long.ll b/test/CodeGen/AArch64/neon-shift-left-long.ll new file mode 100644 index 0000000..d45c476 --- /dev/null +++ b/test/CodeGen/AArch64/neon-shift-left-long.ll @@ -0,0 +1,193 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s + +define <8 x i16> @test_sshll_v8i8(<8 x i8> %a) { +; CHECK: test_sshll_v8i8: +; CHECK: sshll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #3 + %1 = sext <8 x i8> %a to <8 x i16> + %tmp = shl <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_sshll_v4i16(<4 x i16> %a) { +; CHECK: test_sshll_v4i16: +; CHECK: sshll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #9 + %1 = sext <4 x i16> %a to <4 x i32> + %tmp = shl <4 x i32> %1, <i32 9, i32 9, i32 9, i32 9> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_sshll_v2i32(<2 x i32> %a) { +; CHECK: test_sshll_v2i32: +; CHECK: sshll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #19 + %1 = sext <2 x i32> %a to <2 x i64> + %tmp = shl <2 x i64> %1, <i64 19, i64 19> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_ushll_v8i8(<8 x i8> %a) { +; CHECK: test_ushll_v8i8: +; CHECK: ushll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #3 + %1 = zext <8 x i8> %a to <8 x i16> + %tmp = shl <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_ushll_v4i16(<4 x i16> %a) { +; CHECK: test_ushll_v4i16: +; CHECK: ushll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #9 + %1 = zext <4 x i16> %a to <4 x i32> + %tmp = shl <4 x i32> %1, <i32 9, i32 9, i32 9, i32 9> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_ushll_v2i32(<2 x i32> %a) { +; CHECK: test_ushll_v2i32: +; CHECK: ushll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #19 + %1 = zext <2 x i32> %a to <2 x i64> + %tmp = shl <2 x i64> %1, <i64 19, i64 19> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_sshll2_v16i8(<16 x i8> %a) { +; CHECK: test_sshll2_v16i8: +; CHECK: sshll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #3 + %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %2 = sext <8 x i8> %1 to <8 x i16> + %tmp = shl <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_sshll2_v8i16(<8 x i16> %a) { +; CHECK: test_sshll2_v8i16: +; CHECK: sshll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #9 + %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %2 = sext <4 x i16> %1 to <4 x i32> + %tmp = shl <4 x i32> %2, <i32 9, i32 9, i32 9, i32 9> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_sshll2_v4i32(<4 x i32> %a) { +; CHECK: test_sshll2_v4i32: +; CHECK: sshll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #19 + %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + %2 = sext <2 x i32> %1 to <2 x i64> + %tmp = shl <2 x i64> %2, <i64 19, i64 19> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_ushll2_v16i8(<16 x i8> %a) { +; CHECK: test_ushll2_v16i8: +; CHECK: ushll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #3 + %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %2 = zext <8 x i8> %1 to <8 x i16> + %tmp = shl <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_ushll2_v8i16(<8 x i16> %a) { +; CHECK: test_ushll2_v8i16: +; CHECK: ushll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #9 + %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %2 = zext <4 x i16> %1 to <4 x i32> + %tmp = shl <4 x i32> %2, <i32 9, i32 9, i32 9, i32 9> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_ushll2_v4i32(<4 x i32> %a) { +; CHECK: test_ushll2_v4i32: +; CHECK: ushll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #19 + %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + %2 = zext <2 x i32> %1 to <2 x i64> + %tmp = shl <2 x i64> %2, <i64 19, i64 19> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_sshll_shl0_v8i8(<8 x i8> %a) { +; CHECK: test_sshll_shl0_v8i8: +; CHECK: sshll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #0 + %tmp = sext <8 x i8> %a to <8 x i16> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_sshll_shl0_v4i16(<4 x i16> %a) { +; CHECK: test_sshll_shl0_v4i16: +; CHECK: sshll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #0 + %tmp = sext <4 x i16> %a to <4 x i32> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_sshll_shl0_v2i32(<2 x i32> %a) { +; CHECK: test_sshll_shl0_v2i32: +; CHECK: sshll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #0 + %tmp = sext <2 x i32> %a to <2 x i64> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_ushll_shl0_v8i8(<8 x i8> %a) { +; CHECK: test_ushll_shl0_v8i8: +; CHECK: ushll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #0 + %tmp = zext <8 x i8> %a to <8 x i16> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_ushll_shl0_v4i16(<4 x i16> %a) { +; CHECK: test_ushll_shl0_v4i16: +; CHECK: ushll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #0 + %tmp = zext <4 x i16> %a to <4 x i32> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_ushll_shl0_v2i32(<2 x i32> %a) { +; CHECK: test_ushll_shl0_v2i32: +; CHECK: ushll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #0 + %tmp = zext <2 x i32> %a to <2 x i64> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_sshll2_shl0_v16i8(<16 x i8> %a) { +; CHECK: test_sshll2_shl0_v16i8: +; CHECK: sshll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #0 + %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %tmp = sext <8 x i8> %1 to <8 x i16> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_sshll2_shl0_v8i16(<8 x i16> %a) { +; CHECK: test_sshll2_shl0_v8i16: +; CHECK: sshll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #0 + %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %tmp = sext <4 x i16> %1 to <4 x i32> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_sshll2_shl0_v4i32(<4 x i32> %a) { +; CHECK: test_sshll2_shl0_v4i32: +; CHECK: sshll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #0 + %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + %tmp = sext <2 x i32> %1 to <2 x i64> + ret <2 x i64> %tmp +} + +define <8 x i16> @test_ushll2_shl0_v16i8(<16 x i8> %a) { +; CHECK: test_ushll2_shl0_v16i8: +; CHECK: ushll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #0 + %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %tmp = zext <8 x i8> %1 to <8 x i16> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_ushll2_shl0_v8i16(<8 x i16> %a) { +; CHECK: test_ushll2_shl0_v8i16: +; CHECK: ushll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #0 + %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %tmp = zext <4 x i16> %1 to <4 x i32> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_ushll2_shl0_v4i32(<4 x i32> %a) { +; CHECK: test_ushll2_shl0_v4i32: +; CHECK: ushll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #0 + %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + %tmp = zext <2 x i32> %1 to <2 x i64> + ret <2 x i64> %tmp +} diff --git a/test/CodeGen/AArch64/neon-shift.ll b/test/CodeGen/AArch64/neon-shift.ll index 45a2605..9b11ba8 100644 --- a/test/CodeGen/AArch64/neon-shift.ll +++ b/test/CodeGen/AArch64/neon-shift.ll @@ -137,4 +137,52 @@ define <2 x i64> @test_sshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) { } +define <8 x i8> @test_shl_v8i8(<8 x i8> %a) { +; CHECK: test_shl_v8i8: +; CHECK: shl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3 + %tmp = shl <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> + ret <8 x i8> %tmp +} + +define <4 x i16> @test_shl_v4i16(<4 x i16> %a) { +; CHECK: test_shl_v4i16: +; CHECK: shl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3 + %tmp = shl <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3> + ret <4 x i16> %tmp +} + +define <2 x i32> @test_shl_v2i32(<2 x i32> %a) { +; CHECK: test_shl_v2i32: +; CHECK: shl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3 + %tmp = shl <2 x i32> %a, <i32 3, i32 3> + ret <2 x i32> %tmp +} + +define <16 x i8> @test_shl_v16i8(<16 x i8> %a) { +; CHECK: test_shl_v16i8: +; CHECK: shl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3 + %tmp = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> + ret <16 x i8> %tmp +} + +define <8 x i16> @test_shl_v8i16(<8 x i16> %a) { +; CHECK: test_shl_v8i16: +; CHECK: shl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3 + %tmp = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3> + ret <8 x i16> %tmp +} + +define <4 x i32> @test_shl_v4i32(<4 x i32> %a) { +; CHECK: test_shl_v4i32: +; CHECK: shl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3 + %tmp = shl <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3> + ret <4 x i32> %tmp +} + +define <2 x i64> @test_shl_v2i64(<2 x i64> %a) { +; CHECK: test_shl_v2i64: +; CHECK: shl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3 + %tmp = shl <2 x i64> %a, <i64 3, i64 3> + ret <2 x i64> %tmp +} |