diff options
author | Hao Liu <Hao.Liu@arm.com> | 2013-10-10 15:01:24 +0000 |
---|---|---|
committer | Hao Liu <Hao.Liu@arm.com> | 2013-10-10 15:01:24 +0000 |
commit | d622bef31d11a5a6429fe7fad557c9b111e96f69 (patch) | |
tree | 9717677e5d819174ae2e85d817161d320b55dc70 /test | |
parent | 8ccf2b3c9e0f70220c88f3328ddebebd7866f92c (diff) | |
download | external_llvm-d622bef31d11a5a6429fe7fad557c9b111e96f69.zip external_llvm-d622bef31d11a5a6429fe7fad557c9b111e96f69.tar.gz external_llvm-d622bef31d11a5a6429fe7fad557c9b111e96f69.tar.bz2 |
Implement AArch64 vector load/store multiple N-element structure class SIMD(lselem).
Including following 14 instructions:
4 ld1 insts: load multiple 1-element structure to sequential 1/2/3/4 registers.
ld2/ld3/ld4: load multiple N-element structure to sequential N registers (N=2,3,4).
4 st1 insts: store multiple 1-element structure from sequential 1/2/3/4 registers.
st2/st3/st4: store multiple N-element structure from sequential N registers (N = 2,3,4).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192352 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll | 1228 | ||||
-rw-r--r-- | test/MC/AArch64/neon-diagnostics.s | 221 | ||||
-rw-r--r-- | test/MC/AArch64/neon-simd-ldst-multi-elem.s | 463 |
3 files changed, 1912 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll new file mode 100644 index 0000000..4cd76bc --- /dev/null +++ b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll @@ -0,0 +1,1228 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s + +%struct.int8x16x2_t = type { [2 x <16 x i8>] } +%struct.int16x8x2_t = type { [2 x <8 x i16>] } +%struct.int32x4x2_t = type { [2 x <4 x i32>] } +%struct.int64x2x2_t = type { [2 x <2 x i64>] } +%struct.float32x4x2_t = type { [2 x <4 x float>] } +%struct.float64x2x2_t = type { [2 x <2 x double>] } +%struct.int8x8x2_t = type { [2 x <8 x i8>] } +%struct.int16x4x2_t = type { [2 x <4 x i16>] } +%struct.int32x2x2_t = type { [2 x <2 x i32>] } +%struct.int64x1x2_t = type { [2 x <1 x i64>] } +%struct.float32x2x2_t = type { [2 x <2 x float>] } +%struct.float64x1x2_t = type { [2 x <1 x double>] } +%struct.int8x16x3_t = type { [3 x <16 x i8>] } +%struct.int16x8x3_t = type { [3 x <8 x i16>] } +%struct.int32x4x3_t = type { [3 x <4 x i32>] } +%struct.int64x2x3_t = type { [3 x <2 x i64>] } +%struct.float32x4x3_t = type { [3 x <4 x float>] } +%struct.float64x2x3_t = type { [3 x <2 x double>] } +%struct.int8x8x3_t = type { [3 x <8 x i8>] } +%struct.int16x4x3_t = type { [3 x <4 x i16>] } +%struct.int32x2x3_t = type { [3 x <2 x i32>] } +%struct.int64x1x3_t = type { [3 x <1 x i64>] } +%struct.float32x2x3_t = type { [3 x <2 x float>] } +%struct.float64x1x3_t = type { [3 x <1 x double>] } +%struct.int8x16x4_t = type { [4 x <16 x i8>] } +%struct.int16x8x4_t = type { [4 x <8 x i16>] } +%struct.int32x4x4_t = type { [4 x <4 x i32>] } +%struct.int64x2x4_t = type { [4 x <2 x i64>] } +%struct.float32x4x4_t = type { [4 x <4 x float>] } +%struct.float64x2x4_t = type { [4 x <2 x double>] } +%struct.int8x8x4_t = type { [4 x <8 x i8>] } +%struct.int16x4x4_t = type { [4 x <4 x i16>] } +%struct.int32x2x4_t = type { [4 x <2 x i32>] } +%struct.int64x1x4_t = type { [4 x <1 x i64>] } +%struct.float32x2x4_t = type { [4 x <2 x float>] } +%struct.float64x1x4_t = type { [4 x <1 x double>] } + + +define <16 x i8> @test_vld1q_s8(i8* readonly %a) { +; CHECK: test_vld1q_s8 +; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] + %vld1 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %a, i32 1) + ret <16 x i8> %vld1 +} + +define <8 x i16> @test_vld1q_s16(i16* readonly %a) { +; CHECK: test_vld1q_s16 +; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld1 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %1, i32 2) + ret <8 x i16> %vld1 +} + +define <4 x i32> @test_vld1q_s32(i32* readonly %a) { +; CHECK: test_vld1q_s32 +; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld1 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %1, i32 4) + ret <4 x i32> %vld1 +} + +define <2 x i64> @test_vld1q_s64(i64* readonly %a) { +; CHECK: test_vld1q_s64 +; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld1 = tail call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %1, i32 8) + ret <2 x i64> %vld1 +} + +define <4 x float> @test_vld1q_f32(float* readonly %a) { +; CHECK: test_vld1q_f32 +; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %1, i32 4) + ret <4 x float> %vld1 +} + +define <2 x double> @test_vld1q_f64(double* readonly %a) { +; CHECK: test_vld1q_f64 +; CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld1 = tail call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %1, i32 8) + ret <2 x double> %vld1 +} + +define <8 x i8> @test_vld1_s8(i8* readonly %a) { +; CHECK: test_vld1_s8 +; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] + %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1) + ret <8 x i8> %vld1 +} + +define <4 x i16> @test_vld1_s16(i16* readonly %a) { +; CHECK: test_vld1_s16 +; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2) + ret <4 x i16> %vld1 +} + +define <2 x i32> @test_vld1_s32(i32* readonly %a) { +; CHECK: test_vld1_s32 +; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld1 = tail call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %1, i32 4) + ret <2 x i32> %vld1 +} + +define <1 x i64> @test_vld1_s64(i64* readonly %a) { +; CHECK: test_vld1_s64 +; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld1 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %1, i32 8) + ret <1 x i64> %vld1 +} + +define <2 x float> @test_vld1_f32(float* readonly %a) { +; CHECK: test_vld1_f32 +; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %1, i32 4) + ret <2 x float> %vld1 +} + +define <1 x double> @test_vld1_f64(double* readonly %a) { +; CHECK: test_vld1_f64 +; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld1 = tail call <1 x double> @llvm.arm.neon.vld1.v1f64(i8* %1, i32 8) + ret <1 x double> %vld1 +} + +define <8 x i8> @test_vld1_p8(i8* readonly %a) { +; CHECK: test_vld1_p8 +; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] + %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1) + ret <8 x i8> %vld1 +} + +define <4 x i16> @test_vld1_p16(i16* readonly %a) { +; CHECK: test_vld1_p16 +; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2) + ret <4 x i16> %vld1 +} + +define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) { +; CHECK: test_vld2q_s8 +; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] + %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %a, i32 1) + %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2.fca.1.extract, 0, 1 + ret %struct.int8x16x2_t %.fca.0.1.insert +} + +define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) { +; CHECK: test_vld2q_s16 +; CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld2 = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8* %1, i32 2) + %vld2.fca.0.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vld2.fca.1.extract, 0, 1 + ret %struct.int16x8x2_t %.fca.0.1.insert +} + +define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) { +; CHECK: test_vld2q_s32 +; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8* %1, i32 4) + %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vld2.fca.1.extract, 0, 1 + ret %struct.int32x4x2_t %.fca.0.1.insert +} + +define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) { +; CHECK: test_vld2q_s64 +; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld2 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8* %1, i32 8) + %vld2.fca.0.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %vld2.fca.1.extract, 0, 1 + ret %struct.int64x2x2_t %.fca.0.1.insert +} + +define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) { +; CHECK: test_vld2q_f32 +; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) + %vld2.fca.0.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vld2.fca.1.extract, 0, 1 + ret %struct.float32x4x2_t %.fca.0.1.insert +} + +define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) { +; CHECK: test_vld2q_f64 +; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld2 = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8* %1, i32 8) + %vld2.fca.0.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %vld2.fca.1.extract, 0, 1 + ret %struct.float64x2x2_t %.fca.0.1.insert +} + +define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) { +; CHECK: test_vld2_s8 +; CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] + %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %a, i32 1) + %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vld2.fca.1.extract, 0, 1 + ret %struct.int8x8x2_t %.fca.0.1.insert +} + +define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) { +; CHECK: test_vld2_s16 +; CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld2 = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8* %1, i32 2) + %vld2.fca.0.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vld2.fca.1.extract, 0, 1 + ret %struct.int16x4x2_t %.fca.0.1.insert +} + +define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) { +; CHECK: test_vld2_s32 +; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld2 = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8* %1, i32 4) + %vld2.fca.0.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vld2.fca.1.extract, 0, 1 + ret %struct.int32x2x2_t %.fca.0.1.insert +} + +define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) { +; CHECK: test_vld2_s64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld2 = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %1, i32 8) + %vld2.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld2.fca.1.extract, 0, 1 + ret %struct.int64x1x2_t %.fca.0.1.insert +} + +define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) { +; CHECK: test_vld2_f32 +; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld2 = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %1, i32 4) + %vld2.fca.0.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vld2.fca.1.extract, 0, 1 + ret %struct.float32x2x2_t %.fca.0.1.insert +} + +define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) { +; CHECK: test_vld2_f64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld2 = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %1, i32 8) + %vld2.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 0 + %vld2.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 1 + %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld2.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld2.fca.1.extract, 0, 1 + ret %struct.float64x1x2_t %.fca.0.1.insert +} + +define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) { +; CHECK: test_vld3q_s8 +; CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] + %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %a, i32 1) + %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %vld3.fca.2.extract, 0, 2 + ret %struct.int8x16x3_t %.fca.0.2.insert +} + +define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) { +; CHECK: test_vld3q_s16 +; CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8* %1, i32 2) + %vld3.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %vld3.fca.2.extract, 0, 2 + ret %struct.int16x8x3_t %.fca.0.2.insert +} + +define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) { +; CHECK: test_vld3q_s32 +; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %1, i32 4) + %vld3.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %vld3.fca.2.extract, 0, 2 + ret %struct.int32x4x3_t %.fca.0.2.insert +} + +define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) { +; CHECK: test_vld3q_s64 +; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8* %1, i32 8) + %vld3.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %vld3.fca.2.extract, 0, 2 + ret %struct.int64x2x3_t %.fca.0.2.insert +} + +define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) { +; CHECK: test_vld3q_f32 +; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8* %1, i32 4) + %vld3.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %vld3.fca.2.extract, 0, 2 + ret %struct.float32x4x3_t %.fca.0.2.insert +} + +define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) { +; CHECK: test_vld3q_f64 +; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8* %1, i32 8) + %vld3.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %vld3.fca.2.extract, 0, 2 + ret %struct.float64x2x3_t %.fca.0.2.insert +} + +define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) { +; CHECK: test_vld3_s8 +; CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] + %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %a, i32 1) + %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %vld3.fca.2.extract, 0, 2 + ret %struct.int8x8x3_t %.fca.0.2.insert +} + +define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) { +; CHECK: test_vld3_s16 +; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %1, i32 2) + %vld3.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %vld3.fca.2.extract, 0, 2 + ret %struct.int16x4x3_t %.fca.0.2.insert +} + +define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) { +; CHECK: test_vld3_s32 +; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8* %1, i32 4) + %vld3.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %vld3.fca.2.extract, 0, 2 + ret %struct.int32x2x3_t %.fca.0.2.insert +} + +define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) { +; CHECK: test_vld3_s64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %1, i32 8) + %vld3.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld3.fca.2.extract, 0, 2 + ret %struct.int64x1x3_t %.fca.0.2.insert +} + +define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) { +; CHECK: test_vld3_f32 +; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8* %1, i32 4) + %vld3.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %vld3.fca.2.extract, 0, 2 + ret %struct.float32x2x3_t %.fca.0.2.insert +} + +define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) { +; CHECK: test_vld3_f64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %1, i32 8) + %vld3.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 0 + %vld3.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 1 + %vld3.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 2 + %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld3.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld3.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld3.fca.2.extract, 0, 2 + ret %struct.float64x1x3_t %.fca.0.2.insert +} + +define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) { +; CHECK: test_vld4q_s8 +; CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] + %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %a, i32 1) + %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %vld4.fca.3.extract, 0, 3 + ret %struct.int8x16x4_t %.fca.0.3.insert +} + +define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) { +; CHECK: test_vld4q_s16 +; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %1, i32 2) + %vld4.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %vld4.fca.3.extract, 0, 3 + ret %struct.int16x8x4_t %.fca.0.3.insert +} + +define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) { +; CHECK: test_vld4q_s32 +; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8* %1, i32 4) + %vld4.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %vld4.fca.3.extract, 0, 3 + ret %struct.int32x4x4_t %.fca.0.3.insert +} + +define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) { +; CHECK: test_vld4q_s64 +; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8* %1, i32 8) + %vld4.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %vld4.fca.3.extract, 0, 3 + ret %struct.int64x2x4_t %.fca.0.3.insert +} + +define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) { +; CHECK: test_vld4q_f32 +; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4) + %vld4.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %vld4.fca.3.extract, 0, 3 + ret %struct.float32x4x4_t %.fca.0.3.insert +} + +define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) { +; CHECK: test_vld4q_f64 +; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8* %1, i32 8) + %vld4.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %vld4.fca.3.extract, 0, 3 + ret %struct.float64x2x4_t %.fca.0.3.insert +} + +define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) { +; CHECK: test_vld4_s8 +; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] + %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %a, i32 1) + %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %vld4.fca.3.extract, 0, 3 + ret %struct.int8x8x4_t %.fca.0.3.insert +} + +define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) { +; CHECK: test_vld4_s16 +; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %vld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8* %1, i32 2) + %vld4.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %vld4.fca.3.extract, 0, 3 + ret %struct.int16x4x4_t %.fca.0.3.insert +} + +define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) { +; CHECK: test_vld4_s32 +; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %vld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8* %1, i32 4) + %vld4.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %vld4.fca.3.extract, 0, 3 + ret %struct.int32x2x4_t %.fca.0.3.insert +} + +define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) { +; CHECK: test_vld4_s64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %vld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %1, i32 8) + %vld4.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld4.fca.3.extract, 0, 3 + ret %struct.int64x1x4_t %.fca.0.3.insert +} + +define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) { +; CHECK: test_vld4_f32 +; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %vld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8* %1, i32 4) + %vld4.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %vld4.fca.3.extract, 0, 3 + ret %struct.float32x2x4_t %.fca.0.3.insert +} + +define %struct.float64x1x4_t @test_vld4_f64(double* readonly %a) { +; CHECK: test_vld4_f64 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %vld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %1, i32 8) + %vld4.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 0 + %vld4.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 1 + %vld4.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 2 + %vld4.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 3 + %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld4.fca.0.extract, 0, 0 + %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld4.fca.1.extract, 0, 1 + %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld4.fca.2.extract, 0, 2 + %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld4.fca.3.extract, 0, 3 + ret %struct.float64x1x4_t %.fca.0.3.insert +} + +declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) +declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) +declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) +declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) +declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) +declare <2 x double> @llvm.arm.neon.vld1.v2f64(i8*, i32) +declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32) +declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32) +declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) +declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) +declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) +declare <1 x double> @llvm.arm.neon.vld1.v1f64(i8*, i32) +declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8*, i32) +declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) +declare { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8*, i32) +declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32) +declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8*, i32) +declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8*, i32) +declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8*, i32) +declare { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8*, i32) +declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8*, i32) +declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8*, i32) +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8*, i32) +declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8*, i32) +declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8*, i32) +declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8*, i32) +declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32) + +define void @test_vst1q_s8(i8* %a, <16 x i8> %b) { +; CHECK: test_vst1q_s8 +; CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + tail call void @llvm.arm.neon.vst1.v16i8(i8* %a, <16 x i8> %b, i32 1) + ret void +} + +define void @test_vst1q_s16(i16* %a, <8 x i16> %b) { +; CHECK: test_vst1q_s16 +; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst1.v8i16(i8* %1, <8 x i16> %b, i32 2) + ret void +} + +define void @test_vst1q_s32(i32* %a, <4 x i32> %b) { +; CHECK: test_vst1q_s32 +; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst1.v4i32(i8* %1, <4 x i32> %b, i32 4) + ret void +} + +define void @test_vst1q_s64(i64* %a, <2 x i64> %b) { +; CHECK: test_vst1q_s64 +; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst1.v2i64(i8* %1, <2 x i64> %b, i32 8) + ret void +} + +define void @test_vst1q_f32(float* %a, <4 x float> %b) { +; CHECK: test_vst1q_f32 +; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst1.v4f32(i8* %1, <4 x float> %b, i32 4) + ret void +} + +define void @test_vst1q_f64(double* %a, <2 x double> %b) { +; CHECK: test_vst1q_f64 +; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst1.v2f64(i8* %1, <2 x double> %b, i32 8) + ret void +} + +define void @test_vst1_s8(i8* %a, <8 x i8> %b) { +; CHECK: test_vst1_s8 +; CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + tail call void @llvm.arm.neon.vst1.v8i8(i8* %a, <8 x i8> %b, i32 1) + ret void +} + +define void @test_vst1_s16(i16* %a, <4 x i16> %b) { +; CHECK: test_vst1_s16 +; CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst1.v4i16(i8* %1, <4 x i16> %b, i32 2) + ret void +} + +define void @test_vst1_s32(i32* %a, <2 x i32> %b) { +; CHECK: test_vst1_s32 +; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst1.v2i32(i8* %1, <2 x i32> %b, i32 4) + ret void +} + +define void @test_vst1_s64(i64* %a, <1 x i64> %b) { +; CHECK: test_vst1_s64 +; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst1.v1i64(i8* %1, <1 x i64> %b, i32 8) + ret void +} + +define void @test_vst1_f32(float* %a, <2 x float> %b) { +; CHECK: test_vst1_f32 +; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst1.v2f32(i8* %1, <2 x float> %b, i32 4) + ret void +} + +define void @test_vst1_f64(double* %a, <1 x double> %b) { +; CHECK: test_vst1_f64 +; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst1.v1f64(i8* %1, <1 x double> %b, i32 8) + ret void +} + +define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) { +; CHECK: test_vst2q_s8 +; CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1 + tail call void @llvm.arm.neon.vst2.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, i32 1) + ret void +} + +define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) { +; CHECK: test_vst2q_s16 +; CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst2.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 2) + ret void +} + +define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) { +; CHECK: test_vst2q_s32 +; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst2.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 4) + ret void +} + +define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) { +; CHECK: test_vst2q_s64 +; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst2.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 8) + ret void +} + +define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) { +; CHECK: test_vst2q_f32 +; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 4) + ret void +} + +define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) { +; CHECK: test_vst2q_f64 +; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst2.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 8) + ret void +} + +define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) { +; CHECK: test_vst2_s8 +; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1 + tail call void @llvm.arm.neon.vst2.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 1) + ret void +} + +define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) { +; CHECK: test_vst2_s16 +; CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst2.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 2) + ret void +} + +define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) { +; CHECK: test_vst2_s32 +; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst2.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 4) + ret void +} + +define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) { +; CHECK: test_vst2_s64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst2.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 8) + ret void +} + +define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) { +; CHECK: test_vst2_f32 +; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst2.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 4) + ret void +} + +define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) { +; CHECK: test_vst2_f64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst2.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 8) + ret void +} + +define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) { +; CHECK: test_vst3q_s8 +; CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %b.coerce, 2 + tail call void @llvm.arm.neon.vst3.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, i32 1) + ret void +} + +define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) { +; CHECK: test_vst3q_s16 +; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst3.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 2) + ret void +} + +define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) { +; CHECK: test_vst3q_s32 +; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst3.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 4) + ret void +} + +define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) { +; CHECK: test_vst3q_s64 +; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst3.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 8) + ret void +} + +define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) { +; CHECK: test_vst3q_f32 +; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst3.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 4) + ret void +} + +define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) { +; CHECK: test_vst3q_f64 +; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst3.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 8) + ret void +} + +define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) { +; CHECK: test_vst3_s8 +; CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2 + tail call void @llvm.arm.neon.vst3.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 1) + ret void +} + +define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) { +; CHECK: test_vst3_s16 +; CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst3.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 2) + ret void +} + +define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) { +; CHECK: test_vst3_s32 +; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst3.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 4) + ret void +} + +define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) { +; CHECK: test_vst3_s64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst3.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 8) + ret void +} + +define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) { +; CHECK: test_vst3_f32 +; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst3.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 4) + ret void +} + +define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) { +; CHECK: test_vst3_f64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst3.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 8) + ret void +} + +define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) { +; CHECK: test_vst4q_s8 +; CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3 + tail call void @llvm.arm.neon.vst4.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 1) + ret void +} + +define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) { +; CHECK: test_vst4q_s16 +; CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst4.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 2) + ret void +} + +define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) { +; CHECK: test_vst4q_s32 +; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst4.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 4) + ret void +} + +define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) { +; CHECK: test_vst4q_s64 +; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst4.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 8) + ret void +} + +define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) { +; CHECK: test_vst4q_f32 +; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst4.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 4) + ret void +} + +define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) { +; CHECK: test_vst4q_f64 +; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst4.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 8) + ret void +} + +define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) { +; CHECK: test_vst4_s8 +; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3 + tail call void @llvm.arm.neon.vst4.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 1) + ret void +} + +define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) { +; CHECK: test_vst4_s16 +; CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3 + %1 = bitcast i16* %a to i8* + tail call void @llvm.arm.neon.vst4.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 2) + ret void +} + +define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) { +; CHECK: test_vst4_s32 +; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3 + %1 = bitcast i32* %a to i8* + tail call void @llvm.arm.neon.vst4.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 4) + ret void +} + +define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) { +; CHECK: test_vst4_s64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3 + %1 = bitcast i64* %a to i8* + tail call void @llvm.arm.neon.vst4.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 8) + ret void +} + +define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) { +; CHECK: test_vst4_f32 +; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3 + %1 = bitcast float* %a to i8* + tail call void @llvm.arm.neon.vst4.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 4) + ret void +} + +define void @test_vst4_f64(double* %a, [4 x <1 x double>] %b.coerce) { +; CHECK: test_vst4_f64 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0 + %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1 + %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2 + %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3 + %1 = bitcast double* %a to i8* + tail call void @llvm.arm.neon.vst4.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 8) + ret void +} + +declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) +declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) +declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) +declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32) +declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) +declare void @llvm.arm.neon.vst1.v2f64(i8*, <2 x double>, i32) +declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) +declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32) +declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) +declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>, i32) +declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) +declare void @llvm.arm.neon.vst1.v1f64(i8*, <1 x double>, i32) +declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) +declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) +declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) +declare void @llvm.arm.neon.vst2.v2i64(i8*, <2 x i64>, <2 x i64>, i32) +declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) +declare void @llvm.arm.neon.vst2.v2f64(i8*, <2 x double>, <2 x double>, i32) +declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) +declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32) +declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32) +declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32) +declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32) +declare void @llvm.arm.neon.vst2.v1f64(i8*, <1 x double>, <1 x double>, i32) +declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32) +declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) +declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) +declare void @llvm.arm.neon.vst3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32) +declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) +declare void @llvm.arm.neon.vst3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32) +declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) +declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) +declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) +declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32) +declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) +declare void @llvm.arm.neon.vst3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32) +declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32) +declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) +declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) +declare void @llvm.arm.neon.vst4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32) +declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) +declare void @llvm.arm.neon.vst4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) +declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) +declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) +declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) +declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32) +declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) +declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
\ No newline at end of file diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s index 9127ed8..086d487 100644 --- a/test/MC/AArch64/neon-diagnostics.s +++ b/test/MC/AArch64/neon-diagnostics.s @@ -3880,3 +3880,224 @@ // CHECK-ERROR: error: invalid operand for instruction // CHECK-ERROR: frsqrts d8, s22, d18 // CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Vector load/store multiple N-element structure (class SIMD lselem) +//---------------------------------------------------------------------- + ld1 {x3}, [x2] + ld1 {v4}, [x0] + ld1 {v32.16b}, [x0] + ld1 {v15.8h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {x3}, [x2] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {v4}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {v32.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld1 {v15.8h}, [x32] +// CHECK-ERROR: ^ + + ld1 {v0.16b, v2.16b}, [x0] + ld1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0] + ld1 v0.8b, v1.8b}, [x0] + ld1 {v0.8h-v4.8h}, [x0] + ld1 {v1.8h-v1.8h}, [x0] + ld1 {v15.8h-v17.4h}, [x15] + ld1 {v0.8b-v2.8b, [x0] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld1 {v0.16b, v2.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: ld1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: '{' expected +// CHECK-ERROR: ld1 v0.8b, v1.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: ld1 {v0.8h-v4.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: ld1 {v1.8h-v1.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: ld1 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: '}' expected +// CHECK-ERROR: ld1 {v0.8b-v2.8b, [x0] +// CHECK-ERROR: ^ + + ld2 {v15.8h, v16.4h}, [x15] + ld2 {v0.8b, v2.8b}, [x0] + ld2 {v15.4h, v16.4h, v17.4h}, [x32] + ld2 {v15.8h-v16.4h}, [x15] + ld2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld2 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld2 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v15.4h, v16.4h, v17.4h}, [x32] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: ld2 {v15.8h-v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: ^ + + ld3 {v15.8h, v16.8h, v17.4h}, [x15] + ld3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] + ld3 {v0.8b, v2.8b, v3.8b}, [x0] + ld3 {v15.8h-v17.4h}, [x15] + ld3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld3 {v15.8h, v16.8h, v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld3 {v0.8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: ld3 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: ^ + + ld4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] + ld4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] + ld4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31] + ld4 {v15.8h-v18.4h}, [x15] + ld4 {v31.2s-v1.2s}, [x31] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: ld4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: ld4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: ld4 {v15.8h-v18.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v31.2s-v1.2s}, [x31] +// CHECK-ERROR: ^ + + st1 {x3}, [x2] + st1 {v4}, [x0] + st1 {v32.16b}, [x0] + st1 {v15.8h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {x3}, [x2] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {v4}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {v32.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st1 {v15.8h}, [x32] +// CHECK-ERROR: ^ + + st1 {v0.16b, v2.16b}, [x0] + st1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0] + st1 v0.8b, v1.8b}, [x0] + st1 {v0.8h-v4.8h}, [x0] + st1 {v1.8h-v1.8h}, [x0] + st1 {v15.8h-v17.4h}, [x15] + st1 {v0.8b-v2.8b, [x0] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st1 {v0.16b, v2.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: st1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: '{' expected +// CHECK-ERROR: st1 v0.8b, v1.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: st1 {v0.8h-v4.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: st1 {v1.8h-v1.8h}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: st1 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: '}' expected +// CHECK-ERROR: st1 {v0.8b-v2.8b, [x0] +// CHECK-ERROR: ^ + + st2 {v15.8h, v16.4h}, [x15] + st2 {v0.8b, v2.8b}, [x0] + st2 {v15.4h, v16.4h, v17.4h}, [x30] + st2 {v15.8h-v16.4h}, [x15] + st2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st2 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st2 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v15.4h, v16.4h, v17.4h}, [x30] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: st2 {v15.8h-v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: ^ + + st3 {v15.8h, v16.8h, v17.4h}, [x15] + st3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] + st3 {v0.8b, v2.8b, v3.8b}, [x0] + st3 {v15.8h-v17.4h}, [x15] + st3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st3 {v15.8h, v16.8h, v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st3 {v0.8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: st3 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: ^ + + st4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] + st4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] + st4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31] + st4 {v15.8h-v18.4h}, [x15] + st4 {v31.2s-v1.2s}, [x31] +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid space between two vectors +// CHECK-ERROR: st4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid number of vectors +// CHECK-ERROR: st4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected the same vector layout +// CHECK-ERROR: st4 {v15.8h-v18.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v31.2s-v1.2s}, [x31] +// CHECK-ERROR: ^ diff --git a/test/MC/AArch64/neon-simd-ldst-multi-elem.s b/test/MC/AArch64/neon-simd-ldst-multi-elem.s new file mode 100644 index 0000000..05fe4da --- /dev/null +++ b/test/MC/AArch64/neon-simd-ldst-multi-elem.s @@ -0,0 +1,463 @@ +// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s + +// Check that the assembler can handle the documented syntax for AArch64 + +//------------------------------------------------------------------------------ +// Store multiple 1-element structures from one register +//------------------------------------------------------------------------------ + st1 {v0.16b}, [x0] + st1 {v15.8h}, [x15] + st1 {v31.4s}, [sp] + st1 {v0.2d}, [x0] + st1 {v0.8b}, [x0] + st1 {v15.4h}, [x15] + st1 {v31.2s}, [sp] + st1 {v0.1d}, [x0] +// CHECK: st1 {v0.16b}, [x0] // encoding: [0x00,0x70,0x00,0x4c] +// CHECK: st1 {v15.8h}, [x15] // encoding: [0xef,0x75,0x00,0x4c] +// CHECK: st1 {v31.4s}, [sp] // encoding: [0xff,0x7b,0x00,0x4c] +// CHECK: st1 {v0.2d}, [x0] // encoding: [0x00,0x7c,0x00,0x4c] +// CHECK: st1 {v0.8b}, [x0] // encoding: [0x00,0x70,0x00,0x0c] +// CHECK: st1 {v15.4h}, [x15] // encoding: [0xef,0x75,0x00,0x0c] +// CHECK: st1 {v31.2s}, [sp] // encoding: [0xff,0x7b,0x00,0x0c] +// CHECK: st1 {v0.1d}, [x0] // encoding: [0x00,0x7c,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 1-element structures from two consecutive registers +//------------------------------------------------------------------------------ + st1 {v0.16b, v1.16b}, [x0] + st1 {v15.8h, v16.8h}, [x15] + st1 {v31.4s, v0.4s}, [sp] + st1 {v0.2d, v1.2d}, [x0] + st1 {v0.8b, v1.8b}, [x0] + st1 {v15.4h, v16.4h}, [x15] + st1 {v31.2s, v0.2s}, [sp] + st1 {v0.1d, v1.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x00,0x0c] + + st1 {v0.16b-v1.16b}, [x0] + st1 {v15.8h-v16.8h}, [x15] + st1 {v31.4s-v0.4s}, [sp] + st1 {v0.2d-v1.2d}, [x0] + st1 {v0.8b-v1.8b}, [x0] + st1 {v15.4h-v16.4h}, [x15] + st1 {v31.2s-v0.2s}, [sp] + st1 {v0.1d-v1.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 1-element structures from three consecutive registers +//------------------------------------------------------------------------------ + st1 {v0.16b, v1.16b, v2.16b}, [x0] + st1 {v15.8h, v16.8h, v17.8h}, [x15] + st1 {v31.4s, v0.4s, v1.4s}, [sp] + st1 {v0.2d, v1.2d, v2.2d}, [x0] + st1 {v0.8b, v1.8b, v2.8b}, [x0] + st1 {v15.4h, v16.4h, v17.4h}, [x15] + st1 {v31.2s, v0.2s, v1.2s}, [sp] + st1 {v0.1d, v1.1d, v2.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x00,0x0c] + + st1 {v0.16b-v2.16b}, [x0] + st1 {v15.8h-v17.8h}, [x15] + st1 {v31.4s-v1.4s}, [sp] + st1 {v0.2d-v2.2d}, [x0] + st1 {v0.8b-v2.8b}, [x0] + st1 {v15.4h-v17.4h}, [x15] + st1 {v31.2s-v1.2s}, [sp] + st1 {v0.1d-v2.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 1-element structures from four consecutive registers +//------------------------------------------------------------------------------ + st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] + st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] + st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] + st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] + st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] + st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] + st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] + st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x00,0x0c] + + st1 {v0.16b-v3.16b}, [x0] + st1 {v15.8h-v18.8h}, [x15] + st1 {v31.4s-v2.4s}, [sp] + st1 {v0.2d-v3.2d}, [x0] + st1 {v0.8b-v3.8b}, [x0] + st1 {v15.4h-v18.4h}, [x15] + st1 {v31.2s-v2.2s}, [sp] + st1 {v0.1d-v3.1d}, [x0] +// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x00,0x4c] +// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x00,0x4c] +// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x00,0x4c] +// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x00,0x4c] +// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x00,0x0c] +// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x00,0x0c] +// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x00,0x0c] +// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 2-element structures from two consecutive registers +//------------------------------------------------------------------------------ + st2 {v0.16b, v1.16b}, [x0] + st2 {v15.8h, v16.8h}, [x15] + st2 {v31.4s, v0.4s}, [sp] + st2 {v0.2d, v1.2d}, [x0] + st2 {v0.8b, v1.8b}, [x0] + st2 {v15.4h, v16.4h}, [x15] + st2 {v31.2s, v0.2s}, [sp] +// CHECK: st2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x00,0x4c] +// CHECK: st2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x00,0x4c] +// CHECK: st2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x00,0x4c] +// CHECK: st2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x00,0x4c] +// CHECK: st2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x00,0x0c] +// CHECK: st2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x00,0x0c] +// CHECK: st2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x00,0x0c] + + st2 {v0.16b-v1.16b}, [x0] + st2 {v15.8h-v16.8h}, [x15] + st2 {v31.4s-v0.4s}, [sp] + st2 {v0.2d-v1.2d}, [x0] + st2 {v0.8b-v1.8b}, [x0] + st2 {v15.4h-v16.4h}, [x15] + st2 {v31.2s-v0.2s}, [sp] +// CHECK: st2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x00,0x4c] +// CHECK: st2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x00,0x4c] +// CHECK: st2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x00,0x4c] +// CHECK: st2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x00,0x4c] +// CHECK: st2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x00,0x0c] +// CHECK: st2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x00,0x0c] +// CHECK: st2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 3-element structures from three consecutive registers +//------------------------------------------------------------------------------ + st3 {v0.16b, v1.16b, v2.16b}, [x0] + st3 {v15.8h, v16.8h, v17.8h}, [x15] + st3 {v31.4s, v0.4s, v1.4s}, [sp] + st3 {v0.2d, v1.2d, v2.2d}, [x0] + st3 {v0.8b, v1.8b, v2.8b}, [x0] + st3 {v15.4h, v16.4h, v17.4h}, [x15] + st3 {v31.2s, v0.2s, v1.2s}, [sp] +// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x00,0x4c] +// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x00,0x4c] +// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x00,0x4c] +// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x00,0x4c] +// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x00,0x0c] +// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x00,0x0c] +// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x00,0x0c] + + st3 {v0.16b-v2.16b}, [x0] + st3 {v15.8h-v17.8h}, [x15] + st3 {v31.4s-v1.4s}, [sp] + st3 {v0.2d-v2.2d}, [x0] + st3 {v0.8b-v2.8b}, [x0] + st3 {v15.4h-v17.4h}, [x15] + st3 {v31.2s-v1.2s}, [sp] +// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x00,0x4c] +// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x00,0x4c] +// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x00,0x4c] +// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x00,0x4c] +// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x00,0x0c] +// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x00,0x0c] +// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Store multiple 4-element structures from four consecutive registers +//------------------------------------------------------------------------------ + st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] + st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] + st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] + st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] + st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] + st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] + st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] +// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x00,0x4c] +// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x00,0x4c] +// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x00,0x4c] +// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x00,0x4c] +// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x00,0x0c] +// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x00,0x0c] +// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x00,0x0c] + + st4 {v0.16b-v3.16b}, [x0] + st4 {v15.8h-v18.8h}, [x15] + st4 {v31.4s-v2.4s}, [sp] + st4 {v0.2d-v3.2d}, [x0] + st4 {v0.8b-v3.8b}, [x0] + st4 {v15.4h-v18.4h}, [x15] + st4 {v31.2s-v2.2s}, [sp] +// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x00,0x4c] +// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x00,0x4c] +// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x00,0x4c] +// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x00,0x4c] +// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x00,0x0c] +// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x00,0x0c] +// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x00,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 1-element structures to one register +//------------------------------------------------------------------------------ + ld1 {v0.16b}, [x0] + ld1 {v15.8h}, [x15] + ld1 {v31.4s}, [sp] + ld1 {v0.2d}, [x0] + ld1 {v0.8b}, [x0] + ld1 {v15.4h}, [x15] + ld1 {v31.2s}, [sp] + ld1 {v0.1d}, [x0] +// CHECK: ld1 {v0.16b}, [x0] // encoding: [0x00,0x70,0x40,0x4c] +// CHECK: ld1 {v15.8h}, [x15] // encoding: [0xef,0x75,0x40,0x4c] +// CHECK: ld1 {v31.4s}, [sp] // encoding: [0xff,0x7b,0x40,0x4c] +// CHECK: ld1 {v0.2d}, [x0] // encoding: [0x00,0x7c,0x40,0x4c] +// CHECK: ld1 {v0.8b}, [x0] // encoding: [0x00,0x70,0x40,0x0c] +// CHECK: ld1 {v15.4h}, [x15] // encoding: [0xef,0x75,0x40,0x0c] +// CHECK: ld1 {v31.2s}, [sp] // encoding: [0xff,0x7b,0x40,0x0c] +// CHECK: ld1 {v0.1d}, [x0] // encoding: [0x00,0x7c,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 1-element structures to two consecutive registers +//------------------------------------------------------------------------------ + ld1 {v0.16b, v1.16b}, [x0] + ld1 {v15.8h, v16.8h}, [x15] + ld1 {v31.4s, v0.4s}, [sp] + ld1 {v0.2d, v1.2d}, [x0] + ld1 {v0.8b, v1.8b}, [x0] + ld1 {v15.4h, v16.4h}, [x15] + ld1 {v31.2s, v0.2s}, [sp] + ld1 {v0.1d, v1.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x40,0x0c] + + ld1 {v0.16b-v1.16b}, [x0] + ld1 {v15.8h-v16.8h}, [x15] + ld1 {v31.4s-v0.4s}, [sp] + ld1 {v0.2d-v1.2d}, [x0] + ld1 {v0.8b-v1.8b}, [x0] + ld1 {v15.4h-v16.4h}, [x15] + ld1 {v31.2s-v0.2s}, [sp] + ld1 {v0.1d-v1.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 1-element structures to three consecutive registers +//------------------------------------------------------------------------------ + ld1 {v0.16b, v1.16b, v2.16b}, [x0] + ld1 {v15.8h, v16.8h, v17.8h}, [x15] + ld1 {v31.4s, v0.4s, v1.4s}, [sp] + ld1 {v0.2d, v1.2d, v2.2d}, [x0] + ld1 {v0.8b, v1.8b, v2.8b}, [x0] + ld1 {v15.4h, v16.4h, v17.4h}, [x15] + ld1 {v31.2s, v0.2s, v1.2s}, [sp] + ld1 {v0.1d, v1.1d, v2.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x40,0x0c] + + ld1 {v0.16b-v2.16b}, [x0] + ld1 {v15.8h-v17.8h}, [x15] + ld1 {v31.4s-v1.4s}, [sp] + ld1 {v0.2d-v2.2d}, [x0] + ld1 {v0.8b-v2.8b}, [x0] + ld1 {v15.4h-v17.4h}, [x15] + ld1 {v31.2s-v1.2s}, [sp] + ld1 {v0.1d-v2.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 1-element structures to four consecutive registers +//------------------------------------------------------------------------------ + ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] + ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] + ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] + ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] + ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] + ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] + ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] + ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x40,0x0c] + + ld1 {v0.16b-v3.16b}, [x0] + ld1 {v15.8h-v18.8h}, [x15] + ld1 {v31.4s-v2.4s}, [sp] + ld1 {v0.2d-v3.2d}, [x0] + ld1 {v0.8b-v3.8b}, [x0] + ld1 {v15.4h-v18.4h}, [x15] + ld1 {v31.2s-v2.2s}, [sp] + ld1 {v0.1d-v3.1d}, [x0] +// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x40,0x4c] +// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x40,0x4c] +// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x40,0x4c] +// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x40,0x4c] +// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x40,0x0c] +// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x40,0x0c] +// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x40,0x0c] +// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 4-element structures to two consecutive registers +//------------------------------------------------------------------------------ + ld2 {v0.16b, v1.16b}, [x0] + ld2 {v15.8h, v16.8h}, [x15] + ld2 {v31.4s, v0.4s}, [sp] + ld2 {v0.2d, v1.2d}, [x0] + ld2 {v0.8b, v1.8b}, [x0] + ld2 {v15.4h, v16.4h}, [x15] + ld2 {v31.2s, v0.2s}, [sp] +// CHECK: ld2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x40,0x4c] +// CHECK: ld2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x40,0x4c] +// CHECK: ld2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x40,0x4c] +// CHECK: ld2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x40,0x4c] +// CHECK: ld2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x40,0x0c] +// CHECK: ld2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x40,0x0c] +// CHECK: ld2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x40,0x0c] + + ld2 {v0.16b-v1.16b}, [x0] + ld2 {v15.8h-v16.8h}, [x15] + ld2 {v31.4s-v0.4s}, [sp] + ld2 {v0.2d-v1.2d}, [x0] + ld2 {v0.8b-v1.8b}, [x0] + ld2 {v15.4h-v16.4h}, [x15] + ld2 {v31.2s-v0.2s}, [sp] +// CHECK: ld2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x40,0x4c] +// CHECK: ld2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x40,0x4c] +// CHECK: ld2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x40,0x4c] +// CHECK: ld2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x40,0x4c] +// CHECK: ld2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x40,0x0c] +// CHECK: ld2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x40,0x0c] +// CHECK: ld2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 3-element structures to three consecutive registers +//------------------------------------------------------------------------------ + ld3 {v0.16b, v1.16b, v2.16b}, [x0] + ld3 {v15.8h, v16.8h, v17.8h}, [x15] + ld3 {v31.4s, v0.4s, v1.4s}, [sp] + ld3 {v0.2d, v1.2d, v2.2d}, [x0] + ld3 {v0.8b, v1.8b, v2.8b}, [x0] + ld3 {v15.4h, v16.4h, v17.4h}, [x15] + ld3 {v31.2s, v0.2s, v1.2s}, [sp] +// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x40,0x4c] +// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x40,0x4c] +// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x40,0x4c] +// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x40,0x4c] +// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x40,0x0c] +// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x40,0x0c] +// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x40,0x0c] + + ld3 {v0.16b-v2.16b}, [x0] + ld3 {v15.8h-v17.8h}, [x15] + ld3 {v31.4s-v1.4s}, [sp] + ld3 {v0.2d-v2.2d}, [x0] + ld3 {v0.8b-v2.8b}, [x0] + ld3 {v15.4h-v17.4h}, [x15] + ld3 {v31.2s-v1.2s}, [sp] +// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x40,0x4c] +// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x40,0x4c] +// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x40,0x4c] +// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x40,0x4c] +// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x40,0x0c] +// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x40,0x0c] +// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x40,0x0c] + +//------------------------------------------------------------------------------ +// Load multiple 4-element structures to four consecutive registers +//------------------------------------------------------------------------------ + ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] + ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] + ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] + ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] + ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] + ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] + ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] +// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x40,0x4c] +// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x40,0x4c] +// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x40,0x4c] +// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x40,0x4c] +// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x40,0x0c] +// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x40,0x0c] +// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x40,0x0c] + + ld4 {v0.16b-v3.16b}, [x0] + ld4 {v15.8h-v18.8h}, [x15] + ld4 {v31.4s-v2.4s}, [sp] + ld4 {v0.2d-v3.2d}, [x0] + ld4 {v0.8b-v3.8b}, [x0] + ld4 {v15.4h-v18.4h}, [x15] + ld4 {v31.2s-v2.2s}, [sp] +// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x40,0x4c] +// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x40,0x4c] +// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x40,0x4c] +// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x40,0x4c] +// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x40,0x0c] +// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x40,0x0c] +// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x40,0x0c] |