aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-11-11 19:11:11 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-11-11 19:11:11 +0000
commit4c433cf673199528b601f664be3d4c121991a7e2 (patch)
treecf7c194d7c734c7abc43943a7795ef22e37e7278 /test/CodeGen/AArch64
parent095f994ba63994e8eb4b77127f9b872429496dba (diff)
downloadexternal_llvm-4c433cf673199528b601f664be3d4c121991a7e2.zip
external_llvm-4c433cf673199528b601f664be3d4c121991a7e2.tar.gz
external_llvm-4c433cf673199528b601f664be3d4c121991a7e2.tar.bz2
[AArch64] The shift right/left and insert immediate builtins expect 3
source operands, a vector, an element to insert, and a shift amount. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194406 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift-imm.ll32
1 files changed, 18 insertions, 14 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
index b11540f..7e099a3 100644
--- a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
+++ b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
@@ -266,47 +266,51 @@ entry:
declare <1 x i64> @llvm.aarch64.neon.vqshlus.n.v1i64(<1 x i64>, i32)
-define i64 @test_vsrid_n_s64(i64 %a) {
+define i64 @test_vsrid_n_s64(i64 %a, i64 %b) {
; CHECK: test_vsrid_n_s64
; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
entry:
%vsri = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsri1 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, i32 63)
- %0 = extractelement <1 x i64> %vsri1, i32 0
+ %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+ %0 = extractelement <1 x i64> %vsri2, i32 0
ret i64 %0
}
-declare <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64>, i32)
+declare <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64>, <1 x i64>, i32)
-define i64 @test_vsrid_n_u64(i64 %a) {
+define i64 @test_vsrid_n_u64(i64 %a, i64 %b) {
; CHECK: test_vsrid_n_u64
; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
entry:
%vsri = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsri1 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, i32 63)
- %0 = extractelement <1 x i64> %vsri1, i32 0
+ %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+ %0 = extractelement <1 x i64> %vsri2, i32 0
ret i64 %0
}
-define i64 @test_vslid_n_s64(i64 %a) {
+define i64 @test_vslid_n_s64(i64 %a, i64 %b) {
; CHECK: test_vslid_n_s64
; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
entry:
%vsli = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsli1 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, i32 63)
- %0 = extractelement <1 x i64> %vsli1, i32 0
+ %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsli2 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+ %0 = extractelement <1 x i64> %vsli2, i32 0
ret i64 %0
}
-declare <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64>, i32)
+declare <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64>, <1 x i64>, i32)
-define i64 @test_vslid_n_u64(i64 %a) {
+define i64 @test_vslid_n_u64(i64 %a, i64 %b) {
; CHECK: test_vslid_n_u64
; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
entry:
%vsli = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsli1 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, i32 63)
- %0 = extractelement <1 x i64> %vsli1, i32 0
+ %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsli2 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+ %0 = extractelement <1 x i64> %vsli2, i32 0
ret i64 %0
}