diff options
author | Hao Liu <Hao.Liu@arm.com> | 2013-11-18 06:31:53 +0000 |
---|---|---|
committer | Hao Liu <Hao.Liu@arm.com> | 2013-11-18 06:31:53 +0000 |
commit | 97577757c6dc84233ad10cd432664257e593e76d (patch) | |
tree | c7e2e4626673033af48768271210286531c32d41 /test/CodeGen/AArch64 | |
parent | 65c102e7fb68f78117be665dd9d4a73ef7e9f795 (diff) | |
download | external_llvm-97577757c6dc84233ad10cd432664257e593e76d.zip external_llvm-97577757c6dc84233ad10cd432664257e593e76d.tar.gz external_llvm-97577757c6dc84233ad10cd432664257e593e76d.tar.bz2 |
Implement the newly added ACLE functions for ld1/st1 with 2/3/4 vectors.
The functions are like: vst1_s8_x2 ...
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194990 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r-- | test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll | 1212 | ||||
-rw-r--r-- | test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll | 158 |
2 files changed, 1269 insertions, 101 deletions
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll index 4cd76bc..d0e7fc1 100644 --- a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll +++ b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll @@ -39,14 +39,14 @@ define <16 x i8> @test_vld1q_s8(i8* readonly %a) { -; CHECK: test_vld1q_s8 +; CHECK-LABEL: test_vld1q_s8 ; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] %vld1 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %a, i32 1) ret <16 x i8> %vld1 } define <8 x i16> @test_vld1q_s16(i16* readonly %a) { -; CHECK: test_vld1q_s16 +; CHECK-LABEL: test_vld1q_s16 ; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld1 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %1, i32 2) @@ -54,7 +54,7 @@ define <8 x i16> @test_vld1q_s16(i16* readonly %a) { } define <4 x i32> @test_vld1q_s32(i32* readonly %a) { -; CHECK: test_vld1q_s32 +; CHECK-LABEL: test_vld1q_s32 ; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld1 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %1, i32 4) @@ -62,7 +62,7 @@ define <4 x i32> @test_vld1q_s32(i32* readonly %a) { } define <2 x i64> @test_vld1q_s64(i64* readonly %a) { -; CHECK: test_vld1q_s64 +; CHECK-LABEL: test_vld1q_s64 ; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld1 = tail call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %1, i32 8) @@ -70,7 +70,7 @@ define <2 x i64> @test_vld1q_s64(i64* readonly %a) { } define <4 x float> @test_vld1q_f32(float* readonly %a) { -; CHECK: test_vld1q_f32 +; CHECK-LABEL: test_vld1q_f32 ; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %1, i32 4) @@ -78,7 +78,7 @@ define <4 x float> @test_vld1q_f32(float* readonly %a) { } define <2 x double> @test_vld1q_f64(double* readonly %a) { -; CHECK: test_vld1q_f64 +; CHECK-LABEL: test_vld1q_f64 ; CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld1 = tail call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %1, i32 8) @@ -86,14 +86,14 @@ define <2 x double> @test_vld1q_f64(double* readonly %a) { } define <8 x i8> @test_vld1_s8(i8* readonly %a) { -; CHECK: test_vld1_s8 +; CHECK-LABEL: test_vld1_s8 ; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1) ret <8 x i8> %vld1 } define <4 x i16> @test_vld1_s16(i16* readonly %a) { -; CHECK: test_vld1_s16 +; CHECK-LABEL: test_vld1_s16 ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2) @@ -101,7 +101,7 @@ define <4 x i16> @test_vld1_s16(i16* readonly %a) { } define <2 x i32> @test_vld1_s32(i32* readonly %a) { -; CHECK: test_vld1_s32 +; CHECK-LABEL: test_vld1_s32 ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld1 = tail call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %1, i32 4) @@ -109,7 +109,7 @@ define <2 x i32> @test_vld1_s32(i32* readonly %a) { } define <1 x i64> @test_vld1_s64(i64* readonly %a) { -; CHECK: test_vld1_s64 +; CHECK-LABEL: test_vld1_s64 ; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld1 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %1, i32 8) @@ -117,7 +117,7 @@ define <1 x i64> @test_vld1_s64(i64* readonly %a) { } define <2 x float> @test_vld1_f32(float* readonly %a) { -; CHECK: test_vld1_f32 +; CHECK-LABEL: test_vld1_f32 ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %1, i32 4) @@ -125,7 +125,7 @@ define <2 x float> @test_vld1_f32(float* readonly %a) { } define <1 x double> @test_vld1_f64(double* readonly %a) { -; CHECK: test_vld1_f64 +; CHECK-LABEL: test_vld1_f64 ; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld1 = tail call <1 x double> @llvm.arm.neon.vld1.v1f64(i8* %1, i32 8) @@ -133,14 +133,14 @@ define <1 x double> @test_vld1_f64(double* readonly %a) { } define <8 x i8> @test_vld1_p8(i8* readonly %a) { -; CHECK: test_vld1_p8 +; CHECK-LABEL: test_vld1_p8 ; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1) ret <8 x i8> %vld1 } define <4 x i16> @test_vld1_p16(i16* readonly %a) { -; CHECK: test_vld1_p16 +; CHECK-LABEL: test_vld1_p16 ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2) @@ -148,7 +148,7 @@ define <4 x i16> @test_vld1_p16(i16* readonly %a) { } define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) { -; CHECK: test_vld2q_s8 +; CHECK-LABEL: test_vld2q_s8 ; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %a, i32 1) %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0 @@ -159,7 +159,7 @@ define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) { } define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) { -; CHECK: test_vld2q_s16 +; CHECK-LABEL: test_vld2q_s16 ; CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld2 = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8* %1, i32 2) @@ -171,7 +171,7 @@ define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) { } define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) { -; CHECK: test_vld2q_s32 +; CHECK-LABEL: test_vld2q_s32 ; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8* %1, i32 4) @@ -183,7 +183,7 @@ define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) { } define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) { -; CHECK: test_vld2q_s64 +; CHECK-LABEL: test_vld2q_s64 ; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld2 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8* %1, i32 8) @@ -195,7 +195,7 @@ define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) { } define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) { -; CHECK: test_vld2q_f32 +; CHECK-LABEL: test_vld2q_f32 ; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) @@ -207,7 +207,7 @@ define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) { } define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) { -; CHECK: test_vld2q_f64 +; CHECK-LABEL: test_vld2q_f64 ; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld2 = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8* %1, i32 8) @@ -219,7 +219,7 @@ define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) { } define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) { -; CHECK: test_vld2_s8 +; CHECK-LABEL: test_vld2_s8 ; CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %a, i32 1) %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0 @@ -230,7 +230,7 @@ define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) { } define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) { -; CHECK: test_vld2_s16 +; CHECK-LABEL: test_vld2_s16 ; CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld2 = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8* %1, i32 2) @@ -242,7 +242,7 @@ define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) { } define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) { -; CHECK: test_vld2_s32 +; CHECK-LABEL: test_vld2_s32 ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld2 = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8* %1, i32 4) @@ -254,7 +254,7 @@ define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) { } define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) { -; CHECK: test_vld2_s64 +; CHECK-LABEL: test_vld2_s64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld2 = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %1, i32 8) @@ -266,7 +266,7 @@ define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) { } define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) { -; CHECK: test_vld2_f32 +; CHECK-LABEL: test_vld2_f32 ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld2 = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %1, i32 4) @@ -278,7 +278,7 @@ define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) { } define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) { -; CHECK: test_vld2_f64 +; CHECK-LABEL: test_vld2_f64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld2 = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %1, i32 8) @@ -290,7 +290,7 @@ define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) { } define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) { -; CHECK: test_vld3q_s8 +; CHECK-LABEL: test_vld3q_s8 ; CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %a, i32 1) %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0 @@ -303,7 +303,7 @@ define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) { } define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) { -; CHECK: test_vld3q_s16 +; CHECK-LABEL: test_vld3q_s16 ; CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8* %1, i32 2) @@ -317,7 +317,7 @@ define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) { } define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) { -; CHECK: test_vld3q_s32 +; CHECK-LABEL: test_vld3q_s32 ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %1, i32 4) @@ -331,7 +331,7 @@ define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) { } define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) { -; CHECK: test_vld3q_s64 +; CHECK-LABEL: test_vld3q_s64 ; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8* %1, i32 8) @@ -345,7 +345,7 @@ define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) { } define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) { -; CHECK: test_vld3q_f32 +; CHECK-LABEL: test_vld3q_f32 ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8* %1, i32 4) @@ -359,7 +359,7 @@ define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) { } define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) { -; CHECK: test_vld3q_f64 +; CHECK-LABEL: test_vld3q_f64 ; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8* %1, i32 8) @@ -373,7 +373,7 @@ define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) { } define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) { -; CHECK: test_vld3_s8 +; CHECK-LABEL: test_vld3_s8 ; CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %a, i32 1) %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0 @@ -386,7 +386,7 @@ define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) { } define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) { -; CHECK: test_vld3_s16 +; CHECK-LABEL: test_vld3_s16 ; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %1, i32 2) @@ -400,7 +400,7 @@ define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) { } define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) { -; CHECK: test_vld3_s32 +; CHECK-LABEL: test_vld3_s32 ; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8* %1, i32 4) @@ -414,7 +414,7 @@ define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) { } define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) { -; CHECK: test_vld3_s64 +; CHECK-LABEL: test_vld3_s64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %1, i32 8) @@ -428,7 +428,7 @@ define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) { } define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) { -; CHECK: test_vld3_f32 +; CHECK-LABEL: test_vld3_f32 ; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8* %1, i32 4) @@ -442,7 +442,7 @@ define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) { } define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) { -; CHECK: test_vld3_f64 +; CHECK-LABEL: test_vld3_f64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %1, i32 8) @@ -456,7 +456,7 @@ define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) { } define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) { -; CHECK: test_vld4q_s8 +; CHECK-LABEL: test_vld4q_s8 ; CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}] %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %a, i32 1) %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0 @@ -471,7 +471,7 @@ define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) { } define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) { -; CHECK: test_vld4q_s16 +; CHECK-LABEL: test_vld4q_s16 ; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %1, i32 2) @@ -487,7 +487,7 @@ define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) { } define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) { -; CHECK: test_vld4q_s32 +; CHECK-LABEL: test_vld4q_s32 ; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8* %1, i32 4) @@ -503,7 +503,7 @@ define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) { } define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) { -; CHECK: test_vld4q_s64 +; CHECK-LABEL: test_vld4q_s64 ; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8* %1, i32 8) @@ -519,7 +519,7 @@ define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) { } define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) { -; CHECK: test_vld4q_f32 +; CHECK-LABEL: test_vld4q_f32 ; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4) @@ -535,7 +535,7 @@ define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) { } define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) { -; CHECK: test_vld4q_f64 +; CHECK-LABEL: test_vld4q_f64 ; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8* %1, i32 8) @@ -551,7 +551,7 @@ define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) { } define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) { -; CHECK: test_vld4_s8 +; CHECK-LABEL: test_vld4_s8 ; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %a, i32 1) %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0 @@ -566,7 +566,7 @@ define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) { } define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) { -; CHECK: test_vld4_s16 +; CHECK-LABEL: test_vld4_s16 ; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] %1 = bitcast i16* %a to i8* %vld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8* %1, i32 2) @@ -582,7 +582,7 @@ define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) { } define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) { -; CHECK: test_vld4_s32 +; CHECK-LABEL: test_vld4_s32 ; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast i32* %a to i8* %vld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8* %1, i32 4) @@ -598,7 +598,7 @@ define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) { } define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) { -; CHECK: test_vld4_s64 +; CHECK-LABEL: test_vld4_s64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast i64* %a to i8* %vld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %1, i32 8) @@ -614,7 +614,7 @@ define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) { } define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) { -; CHECK: test_vld4_f32 +; CHECK-LABEL: test_vld4_f32 ; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] %1 = bitcast float* %a to i8* %vld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8* %1, i32 4) @@ -630,7 +630,7 @@ define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) { } define %struct.float64x1x4_t @test_vld4_f64(double* readonly %a) { -; CHECK: test_vld4_f64 +; CHECK-LABEL: test_vld4_f64 ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}] %1 = bitcast double* %a to i8* %vld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %1, i32 8) @@ -695,14 +695,14 @@ declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vl declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32) define void @test_vst1q_s8(i8* %a, <16 x i8> %b) { -; CHECK: test_vst1q_s8 +; CHECK-LABEL: test_vst1q_s8 ; CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] tail call void @llvm.arm.neon.vst1.v16i8(i8* %a, <16 x i8> %b, i32 1) ret void } define void @test_vst1q_s16(i16* %a, <8 x i16> %b) { -; CHECK: test_vst1q_s16 +; CHECK-LABEL: test_vst1q_s16 ; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] %1 = bitcast i16* %a to i8* tail call void @llvm.arm.neon.vst1.v8i16(i8* %1, <8 x i16> %b, i32 2) @@ -710,7 +710,7 @@ define void @test_vst1q_s16(i16* %a, <8 x i16> %b) { } define void @test_vst1q_s32(i32* %a, <4 x i32> %b) { -; CHECK: test_vst1q_s32 +; CHECK-LABEL: test_vst1q_s32 ; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %1 = bitcast i32* %a to i8* tail call void @llvm.arm.neon.vst1.v4i32(i8* %1, <4 x i32> %b, i32 4) @@ -718,7 +718,7 @@ define void @test_vst1q_s32(i32* %a, <4 x i32> %b) { } define void @test_vst1q_s64(i64* %a, <2 x i64> %b) { -; CHECK: test_vst1q_s64 +; CHECK-LABEL: test_vst1q_s64 ; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %1 = bitcast i64* %a to i8* tail call void @llvm.arm.neon.vst1.v2i64(i8* %1, <2 x i64> %b, i32 8) @@ -726,7 +726,7 @@ define void @test_vst1q_s64(i64* %a, <2 x i64> %b) { } define void @test_vst1q_f32(float* %a, <4 x float> %b) { -; CHECK: test_vst1q_f32 +; CHECK-LABEL: test_vst1q_f32 ; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %1 = bitcast float* %a to i8* tail call void @llvm.arm.neon.vst1.v4f32(i8* %1, <4 x float> %b, i32 4) @@ -734,7 +734,7 @@ define void @test_vst1q_f32(float* %a, <4 x float> %b) { } define void @test_vst1q_f64(double* %a, <2 x double> %b) { -; CHECK: test_vst1q_f64 +; CHECK-LABEL: test_vst1q_f64 ; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %1 = bitcast double* %a to i8* tail call void @llvm.arm.neon.vst1.v2f64(i8* %1, <2 x double> %b, i32 8) @@ -742,14 +742,14 @@ define void @test_vst1q_f64(double* %a, <2 x double> %b) { } define void @test_vst1_s8(i8* %a, <8 x i8> %b) { -; CHECK: test_vst1_s8 +; CHECK-LABEL: test_vst1_s8 ; CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] tail call void @llvm.arm.neon.vst1.v8i8(i8* %a, <8 x i8> %b, i32 1) ret void } define void @test_vst1_s16(i16* %a, <4 x i16> %b) { -; CHECK: test_vst1_s16 +; CHECK-LABEL: test_vst1_s16 ; CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] %1 = bitcast i16* %a to i8* tail call void @llvm.arm.neon.vst1.v4i16(i8* %1, <4 x i16> %b, i32 2) @@ -757,7 +757,7 @@ define void @test_vst1_s16(i16* %a, <4 x i16> %b) { } define void @test_vst1_s32(i32* %a, <2 x i32> %b) { -; CHECK: test_vst1_s32 +; CHECK-LABEL: test_vst1_s32 ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %1 = bitcast i32* %a to i8* tail call void @llvm.arm.neon.vst1.v2i32(i8* %1, <2 x i32> %b, i32 4) @@ -765,7 +765,7 @@ define void @test_vst1_s32(i32* %a, <2 x i32> %b) { } define void @test_vst1_s64(i64* %a, <1 x i64> %b) { -; CHECK: test_vst1_s64 +; CHECK-LABEL: test_vst1_s64 ; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %1 = bitcast i64* %a to i8* tail call void @llvm.arm.neon.vst1.v1i64(i8* %1, <1 x i64> %b, i32 8) @@ -773,7 +773,7 @@ define void @test_vst1_s64(i64* %a, <1 x i64> %b) { } define void @test_vst1_f32(float* %a, <2 x float> %b) { -; CHECK: test_vst1_f32 +; CHECK-LABEL: test_vst1_f32 ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %1 = bitcast float* %a to i8* tail call void @llvm.arm.neon.vst1.v2f32(i8* %1, <2 x float> %b, i32 4) @@ -781,7 +781,7 @@ define void @test_vst1_f32(float* %a, <2 x float> %b) { } define void @test_vst1_f64(double* %a, <1 x double> %b) { -; CHECK: test_vst1_f64 +; CHECK-LABEL: test_vst1_f64 ; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %1 = bitcast double* %a to i8* tail call void @llvm.arm.neon.vst1.v1f64(i8* %1, <1 x double> %b, i32 8) @@ -789,7 +789,7 @@ define void @test_vst1_f64(double* %a, <1 x double> %b) { } define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) { -; CHECK: test_vst2q_s8 +; CHECK-LABEL: test_vst2q_s8 ; CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1 @@ -798,7 +798,7 @@ define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) { } define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) { -; CHECK: test_vst2q_s16 +; CHECK-LABEL: test_vst2q_s16 ; CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1 @@ -808,7 +808,7 @@ define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) { } define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) { -; CHECK: test_vst2q_s32 +; CHECK-LABEL: test_vst2q_s32 ; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1 @@ -818,7 +818,7 @@ define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) { } define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) { -; CHECK: test_vst2q_s64 +; CHECK-LABEL: test_vst2q_s64 ; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1 @@ -828,7 +828,7 @@ define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) { } define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) { -; CHECK: test_vst2q_f32 +; CHECK-LABEL: test_vst2q_f32 ; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1 @@ -838,7 +838,7 @@ define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) { } define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) { -; CHECK: test_vst2q_f64 +; CHECK-LABEL: test_vst2q_f64 ; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1 @@ -848,7 +848,7 @@ define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) { } define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) { -; CHECK: test_vst2_s8 +; CHECK-LABEL: test_vst2_s8 ; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1 @@ -857,7 +857,7 @@ define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) { } define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) { -; CHECK: test_vst2_s16 +; CHECK-LABEL: test_vst2_s16 ; CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1 @@ -867,7 +867,7 @@ define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) { } define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) { -; CHECK: test_vst2_s32 +; CHECK-LABEL: test_vst2_s32 ; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1 @@ -877,7 +877,7 @@ define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) { } define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) { -; CHECK: test_vst2_s64 +; CHECK-LABEL: test_vst2_s64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1 @@ -887,7 +887,7 @@ define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) { } define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) { -; CHECK: test_vst2_f32 +; CHECK-LABEL: test_vst2_f32 ; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1 @@ -897,7 +897,7 @@ define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) { } define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) { -; CHECK: test_vst2_f64 +; CHECK-LABEL: test_vst2_f64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1 @@ -907,7 +907,7 @@ define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) { } define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) { -; CHECK: test_vst3q_s8 +; CHECK-LABEL: test_vst3q_s8 ; CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1 @@ -917,7 +917,7 @@ define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) { } define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) { -; CHECK: test_vst3q_s16 +; CHECK-LABEL: test_vst3q_s16 ; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1 @@ -928,7 +928,7 @@ define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) { } define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) { -; CHECK: test_vst3q_s32 +; CHECK-LABEL: test_vst3q_s32 ; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1 @@ -939,7 +939,7 @@ define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) { } define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) { -; CHECK: test_vst3q_s64 +; CHECK-LABEL: test_vst3q_s64 ; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1 @@ -950,7 +950,7 @@ define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) { } define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) { -; CHECK: test_vst3q_f32 +; CHECK-LABEL: test_vst3q_f32 ; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1 @@ -961,7 +961,7 @@ define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) { } define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) { -; CHECK: test_vst3q_f64 +; CHECK-LABEL: test_vst3q_f64 ; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1 @@ -972,7 +972,7 @@ define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) { } define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) { -; CHECK: test_vst3_s8 +; CHECK-LABEL: test_vst3_s8 ; CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1 @@ -982,7 +982,7 @@ define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) { } define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) { -; CHECK: test_vst3_s16 +; CHECK-LABEL: test_vst3_s16 ; CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1 @@ -993,7 +993,7 @@ define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) { } define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) { -; CHECK: test_vst3_s32 +; CHECK-LABEL: test_vst3_s32 ; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1 @@ -1004,7 +1004,7 @@ define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) { } define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) { -; CHECK: test_vst3_s64 +; CHECK-LABEL: test_vst3_s64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1 @@ -1015,7 +1015,7 @@ define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) { } define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) { -; CHECK: test_vst3_f32 +; CHECK-LABEL: test_vst3_f32 ; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1 @@ -1026,7 +1026,7 @@ define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) { } define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) { -; CHECK: test_vst3_f64 +; CHECK-LABEL: test_vst3_f64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1 @@ -1037,7 +1037,7 @@ define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) { } define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) { -; CHECK: test_vst4q_s8 +; CHECK-LABEL: test_vst4q_s8 ; CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1 @@ -1048,7 +1048,7 @@ define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) { } define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) { -; CHECK: test_vst4q_s16 +; CHECK-LABEL: test_vst4q_s16 ; CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1 @@ -1060,7 +1060,7 @@ define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) { } define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) { -; CHECK: test_vst4q_s32 +; CHECK-LABEL: test_vst4q_s32 ; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1 @@ -1072,7 +1072,7 @@ define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) { } define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) { -; CHECK: test_vst4q_s64 +; CHECK-LABEL: test_vst4q_s64 ; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1 @@ -1084,7 +1084,7 @@ define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) { } define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) { -; CHECK: test_vst4q_f32 +; CHECK-LABEL: test_vst4q_f32 ; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1 @@ -1096,7 +1096,7 @@ define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) { } define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) { -; CHECK: test_vst4q_f64 +; CHECK-LABEL: test_vst4q_f64 ; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1 @@ -1108,7 +1108,7 @@ define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) { } define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) { -; CHECK: test_vst4_s8 +; CHECK-LABEL: test_vst4_s8 ; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1 @@ -1119,7 +1119,7 @@ define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) { } define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) { -; CHECK: test_vst4_s16 +; CHECK-LABEL: test_vst4_s16 ; CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1 @@ -1131,7 +1131,7 @@ define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) { } define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) { -; CHECK: test_vst4_s32 +; CHECK-LABEL: test_vst4_s32 ; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1 @@ -1143,7 +1143,7 @@ define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) { } define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) { -; CHECK: test_vst4_s64 +; CHECK-LABEL: test_vst4_s64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1 @@ -1155,7 +1155,7 @@ define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) { } define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) { -; CHECK: test_vst4_f32 +; CHECK-LABEL: test_vst4_f32 ; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1 @@ -1167,7 +1167,7 @@ define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) { } define void @test_vst4_f64(double* %a, [4 x <1 x double>] %b.coerce) { -; CHECK: test_vst4_f64 +; CHECK-LABEL: test_vst4_f64 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0 %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1 @@ -1225,4 +1225,1018 @@ declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32) declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) -declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
\ No newline at end of file +declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32) + +define %struct.int8x16x2_t @test_vld1q_s8_x2(i8* %a) { +; CHECK-LABEL: test_vld1q_s8_x2 +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %1 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1) + %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0 + %3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1 + %4 = insertvalue %struct.int8x16x2_t undef, <16 x i8> %2, 0, 0 + %5 = insertvalue %struct.int8x16x2_t %4, <16 x i8> %3, 0, 1 + ret %struct.int8x16x2_t %5 +} + +define %struct.int16x8x2_t @test_vld1q_s16_x2(i16* %a) { +; CHECK-LABEL: test_vld1q_s16_x2 +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2) + %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0 + %4 = extractvalue { <8 x i16>, <8 x i16> } %2, 1 + %5 = insertvalue %struct.int16x8x2_t undef, <8 x i16> %3, 0, 0 + %6 = insertvalue %struct.int16x8x2_t %5, <8 x i16> %4, 0, 1 + ret %struct.int16x8x2_t %6 +} + +define %struct.int32x4x2_t @test_vld1q_s32_x2(i32* %a) { +; CHECK-LABEL: test_vld1q_s32_x2 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8* %1, i32 4) + %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0 + %4 = extractvalue { <4 x i32>, <4 x i32> } %2, 1 + %5 = insertvalue %struct.int32x4x2_t undef, <4 x i32> %3, 0, 0 + %6 = insertvalue %struct.int32x4x2_t %5, <4 x i32> %4, 0, 1 + ret %struct.int32x4x2_t %6 +} + +define %struct.int64x2x2_t @test_vld1q_s64_x2(i64* %a) { +; CHECK-LABEL: test_vld1q_s64_x2 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8* %1, i32 8) + %3 = extractvalue { <2 x i64>, <2 x i64> } %2, 0 + %4 = extractvalue { <2 x i64>, <2 x i64> } %2, 1 + %5 = insertvalue %struct.int64x2x2_t undef, <2 x i64> %3, 0, 0 + %6 = insertvalue %struct.int64x2x2_t %5, <2 x i64> %4, 0, 1 + ret %struct.int64x2x2_t %6 +} + +define %struct.float32x4x2_t @test_vld1q_f32_x2(float* %a) { +; CHECK-LABEL: test_vld1q_f32_x2 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8* %1, i32 4) + %3 = extractvalue { <4 x float>, <4 x float> } %2, 0 + %4 = extractvalue { <4 x float>, <4 x float> } %2, 1 + %5 = insertvalue %struct.float32x4x2_t undef, <4 x float> %3, 0, 0 + %6 = insertvalue %struct.float32x4x2_t %5, <4 x float> %4, 0, 1 + ret %struct.float32x4x2_t %6 +} + + +define %struct.float64x2x2_t @test_vld1q_f64_x2(double* %a) { +; CHECK-LABEL: test_vld1q_f64_x2 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8* %1, i32 8) + %3 = extractvalue { <2 x double>, <2 x double> } %2, 0 + %4 = extractvalue { <2 x double>, <2 x double> } %2, 1 + %5 = insertvalue %struct.float64x2x2_t undef, <2 x double> %3, 0, 0 + %6 = insertvalue %struct.float64x2x2_t %5, <2 x double> %4, 0, 1 + ret %struct.float64x2x2_t %6 +} + +define %struct.int8x8x2_t @test_vld1_s8_x2(i8* %a) { +; CHECK-LABEL: test_vld1_s8_x2 +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %1 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8* %a, i32 1) + %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0 + %3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1 + %4 = insertvalue %struct.int8x8x2_t undef, <8 x i8> %2, 0, 0 + %5 = insertvalue %struct.int8x8x2_t %4, <8 x i8> %3, 0, 1 + ret %struct.int8x8x2_t %5 +} + +define %struct.int16x4x2_t @test_vld1_s16_x2(i16* %a) { +; CHECK-LABEL: test_vld1_s16_x2 +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8* %1, i32 2) + %3 = extractvalue { <4 x i16>, <4 x i16> } %2, 0 + %4 = extractvalue { <4 x i16>, <4 x i16> } %2, 1 + %5 = insertvalue %struct.int16x4x2_t undef, <4 x i16> %3, 0, 0 + %6 = insertvalue %struct.int16x4x2_t %5, <4 x i16> %4, 0, 1 + ret %struct.int16x4x2_t %6 +} + +define %struct.int32x2x2_t @test_vld1_s32_x2(i32* %a) { +; CHECK-LABEL: test_vld1_s32_x2 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8* %1, i32 4) + %3 = extractvalue { <2 x i32>, <2 x i32> } %2, 0 + %4 = extractvalue { <2 x i32>, <2 x i32> } %2, 1 + %5 = insertvalue %struct.int32x2x2_t undef, <2 x i32> %3, 0, 0 + %6 = insertvalue %struct.int32x2x2_t %5, <2 x i32> %4, 0, 1 + ret %struct.int32x2x2_t %6 +} + +define %struct.int64x1x2_t @test_vld1_s64_x2(i64* %a) { +; CHECK-LABEL: test_vld1_s64_x2 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8* %1, i32 8) + %3 = extractvalue { <1 x i64>, <1 x i64> } %2, 0 + %4 = extractvalue { <1 x i64>, <1 x i64> } %2, 1 + %5 = insertvalue %struct.int64x1x2_t undef, <1 x i64> %3, 0, 0 + %6 = insertvalue %struct.int64x1x2_t %5, <1 x i64> %4, 0, 1 + ret %struct.int64x1x2_t %6 +} + +define %struct.float32x2x2_t @test_vld1_f32_x2(float* %a) { +; CHECK-LABEL: test_vld1_f32_x2 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8* %1, i32 4) + %3 = extractvalue { <2 x float>, <2 x float> } %2, 0 + %4 = extractvalue { <2 x float>, <2 x float> } %2, 1 + %5 = insertvalue %struct.float32x2x2_t undef, <2 x float> %3, 0, 0 + %6 = insertvalue %struct.float32x2x2_t %5, <2 x float> %4, 0, 1 + ret %struct.float32x2x2_t %6 +} + +define %struct.float64x1x2_t @test_vld1_f64_x2(double* %a) { +; CHECK-LABEL: test_vld1_f64_x2 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8* %1, i32 8) + %3 = extractvalue { <1 x double>, <1 x double> } %2, 0 + %4 = extractvalue { <1 x double>, <1 x double> } %2, 1 + %5 = insertvalue %struct.float64x1x2_t undef, <1 x double> %3, 0, 0 + %6 = insertvalue %struct.float64x1x2_t %5, <1 x double> %4, 0, 1 + ret %struct.float64x1x2_t %6 +} + +define %struct.int8x16x3_t @test_vld1q_s8_x3(i8* %a) { +; CHECK-LABEL: test_vld1q_s8_x3 +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, +; [{{x[0-9]+|sp}}] + %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8* %a, i32 1) + %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 0 + %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 1 + %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 2 + %5 = insertvalue %struct.int8x16x3_t undef, <16 x i8> %2, 0, 0 + %6 = insertvalue %struct.int8x16x3_t %5, <16 x i8> %3, 0, 1 + %7 = insertvalue %struct.int8x16x3_t %6, <16 x i8> %4, 0, 2 + ret %struct.int8x16x3_t %7 +} + +define %struct.int16x8x3_t @test_vld1q_s16_x3(i16* %a) { +; CHECK-LABEL: test_vld1q_s16_x3 +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, +; [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2) + %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0 + %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 1 + %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 2 + %6 = insertvalue %struct.int16x8x3_t undef, <8 x i16> %3, 0, 0 + %7 = insertvalue %struct.int16x8x3_t %6, <8 x i16> %4, 0, 1 + %8 = insertvalue %struct.int16x8x3_t %7, <8 x i16> %5, 0, 2 + ret %struct.int16x8x3_t %8 +} + +define %struct.int32x4x3_t @test_vld1q_s32_x3(i32* %a) { +; CHECK-LABEL: test_vld1q_s32_x3 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, +; [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8* %1, i32 4) + %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 0 + %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 1 + %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 2 + %6 = insertvalue %struct.int32x4x3_t undef, <4 x i32> %3, 0, 0 + %7 = insertvalue %struct.int32x4x3_t %6, <4 x i32> %4, 0, 1 + %8 = insertvalue %struct.int32x4x3_t %7, <4 x i32> %5, 0, 2 + ret %struct.int32x4x3_t %8 +} + +define %struct.int64x2x3_t @test_vld1q_s64_x3(i64* %a) { +; CHECK-LABEL: test_vld1q_s64_x3 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, +; [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8) + %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0 + %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 1 + %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 2 + %6 = insertvalue %struct.int64x2x3_t undef, <2 x i64> %3, 0, 0 + %7 = insertvalue %struct.int64x2x3_t %6, <2 x i64> %4, 0, 1 + %8 = insertvalue %struct.int64x2x3_t %7, <2 x i64> %5, 0, 2 + ret %struct.int64x2x3_t %8 +} + +define %struct.float32x4x3_t @test_vld1q_f32_x3(float* %a) { +; CHECK-LABEL: test_vld1q_f32_x3 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, +; [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8* %1, i32 4) + %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 0 + %4 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 1 + %5 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 2 + %6 = insertvalue %struct.float32x4x3_t undef, <4 x float> %3, 0, 0 + %7 = insertvalue %struct.float32x4x3_t %6, <4 x float> %4, 0, 1 + %8 = insertvalue %struct.float32x4x3_t %7, <4 x float> %5, 0, 2 + ret %struct.float32x4x3_t %8 +} + + +define %struct.float64x2x3_t @test_vld1q_f64_x3(double* %a) { +; CHECK-LABEL: test_vld1q_f64_x3 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, +; [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8* %1, i32 8) + %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 0 + %4 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 1 + %5 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 2 + %6 = insertvalue %struct.float64x2x3_t undef, <2 x double> %3, 0, 0 + %7 = insertvalue %struct.float64x2x3_t %6, <2 x double> %4, 0, 1 + %8 = insertvalue %struct.float64x2x3_t %7, <2 x double> %5, 0, 2 + ret %struct.float64x2x3_t %8 +} + +define %struct.int8x8x3_t @test_vld1_s8_x3(i8* %a) { +; CHECK-LABEL: test_vld1_s8_x3 +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, +; [{{x[0-9]+|sp}}] + %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8* %a, i32 1) + %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 + %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1 + %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2 + %5 = insertvalue %struct.int8x8x3_t undef, <8 x i8> %2, 0, 0 + %6 = insertvalue %struct.int8x8x3_t %5, <8 x i8> %3, 0, 1 + %7 = insertvalue %struct.int8x8x3_t %6, <8 x i8> %4, 0, 2 + ret %struct.int8x8x3_t %7 +} + +define %struct.int16x4x3_t @test_vld1_s16_x3(i16* %a) { +; CHECK-LABEL: test_vld1_s16_x3 +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, +; [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8* %1, i32 2) + %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0 + %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1 + %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2 + %6 = insertvalue %struct.int16x4x3_t undef, <4 x i16> %3, 0, 0 + %7 = insertvalue %struct.int16x4x3_t %6, <4 x i16> %4, 0, 1 + %8 = insertvalue %struct.int16x4x3_t %7, <4 x i16> %5, 0, 2 + ret %struct.int16x4x3_t %8 +} + +define %struct.int32x2x3_t @test_vld1_s32_x3(i32* %a) { + %1 = bitcast i32* %a to i8* +; CHECK-LABEL: test_vld1_s32_x3 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, +; [{{x[0-9]+|sp}}] + %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8* %1, i32 4) + %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 0 + %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 1 + %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 2 + %6 = insertvalue %struct.int32x2x3_t undef, <2 x i32> %3, 0, 0 + %7 = insertvalue %struct.int32x2x3_t %6, <2 x i32> %4, 0, 1 + %8 = insertvalue %struct.int32x2x3_t %7, <2 x i32> %5, 0, 2 + ret %struct.int32x2x3_t %8 +} + +define %struct.int64x1x3_t @test_vld1_s64_x3(i64* %a) { +; CHECK-LABEL: test_vld1_s64_x3 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, +; [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8* %1, i32 8) + %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 0 + %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 1 + %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 2 + %6 = insertvalue %struct.int64x1x3_t undef, <1 x i64> %3, 0, 0 + %7 = insertvalue %struct.int64x1x3_t %6, <1 x i64> %4, 0, 1 + %8 = insertvalue %struct.int64x1x3_t %7, <1 x i64> %5, 0, 2 + ret %struct.int64x1x3_t %8 +} + +define %struct.float32x2x3_t @test_vld1_f32_x3(float* %a) { +; CHECK-LABEL: test_vld1_f32_x3 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, +; [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8* %1, i32 4) + %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 0 + %4 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 1 + %5 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 2 + %6 = insertvalue %struct.float32x2x3_t undef, <2 x float> %3, 0, 0 + %7 = insertvalue %struct.float32x2x3_t %6, <2 x float> %4, 0, 1 + %8 = insertvalue %struct.float32x2x3_t %7, <2 x float> %5, 0, 2 + ret %struct.float32x2x3_t %8 +} + + +define %struct.float64x1x3_t @test_vld1_f64_x3(double* %a) { +; CHECK-LABEL: test_vld1_f64_x3 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, +; [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8* %1, i32 8) + %3 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 0 + %4 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 1 + %5 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 2 + %6 = insertvalue %struct.float64x1x3_t undef, <1 x double> %3, 0, 0 + %7 = insertvalue %struct.float64x1x3_t %6, <1 x double> %4, 0, 1 + %8 = insertvalue %struct.float64x1x3_t %7, <1 x double> %5, 0, 2 + ret %struct.float64x1x3_t %8 +} + +define %struct.int8x16x4_t @test_vld1q_s8_x4(i8* %a) { +; CHECK-LABEL: test_vld1q_s8_x4 +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, +; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8* %a, i32 1) + %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 0 + %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 1 + %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 2 + %5 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 3 + %6 = insertvalue %struct.int8x16x4_t undef, <16 x i8> %2, 0, 0 + %7 = insertvalue %struct.int8x16x4_t %6, <16 x i8> %3, 0, 1 + %8 = insertvalue %struct.int8x16x4_t %7, <16 x i8> %4, 0, 2 + %9 = insertvalue %struct.int8x16x4_t %8, <16 x i8> %5, 0, 3 + ret %struct.int8x16x4_t %9 +} + +define %struct.int16x8x4_t @test_vld1q_s16_x4(i16* %a) { +; CHECK-LABEL: test_vld1q_s16_x4 +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, +; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8* %1, i32 2) + %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 0 + %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 1 + %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 2 + %6 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 3 + %7 = insertvalue %struct.int16x8x4_t undef, <8 x i16> %3, 0, 0 + %8 = insertvalue %struct.int16x8x4_t %7, <8 x i16> %4, 0, 1 + %9 = insertvalue %struct.int16x8x4_t %8, <8 x i16> %5, 0, 2 + %10 = insertvalue %struct.int16x8x4_t %9, <8 x i16> %6, 0, 3 + ret %struct.int16x8x4_t %10 +} + +define %struct.int32x4x4_t @test_vld1q_s32_x4(i32* %a) { +; CHECK-LABEL: test_vld1q_s32_x4 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8* %1, i32 4) + %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 0 + %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 1 + %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 2 + %6 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 3 + %7 = insertvalue %struct.int32x4x4_t undef, <4 x i32> %3, 0, 0 + %8 = insertvalue %struct.int32x4x4_t %7, <4 x i32> %4, 0, 1 + %9 = insertvalue %struct.int32x4x4_t %8, <4 x i32> %5, 0, 2 + %10 = insertvalue %struct.int32x4x4_t %9, <4 x i32> %6, 0, 3 + ret %struct.int32x4x4_t %10 +} + +define %struct.int64x2x4_t @test_vld1q_s64_x4(i64* %a) { +; CHECK-LABEL: test_vld1q_s64_x4 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8* %1, i32 8) + %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 0 + %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 1 + %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 2 + %6 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 3 + %7 = insertvalue %struct.int64x2x4_t undef, <2 x i64> %3, 0, 0 + %8 = insertvalue %struct.int64x2x4_t %7, <2 x i64> %4, 0, 1 + %9 = insertvalue %struct.int64x2x4_t %8, <2 x i64> %5, 0, 2 + %10 = insertvalue %struct.int64x2x4_t %9, <2 x i64> %6, 0, 3 + ret %struct.int64x2x4_t %10 +} + +define %struct.float32x4x4_t @test_vld1q_f32_x4(float* %a) { +; CHECK-LABEL: test_vld1q_f32_x4 +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4) + %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0 + %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 1 + %5 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 2 + %6 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 3 + %7 = insertvalue %struct.float32x4x4_t undef, <4 x float> %3, 0, 0 + %8 = insertvalue %struct.float32x4x4_t %7, <4 x float> %4, 0, 1 + %9 = insertvalue %struct.float32x4x4_t %8, <4 x float> %5, 0, 2 + %10 = insertvalue %struct.float32x4x4_t %9, <4 x float> %6, 0, 3 + ret %struct.float32x4x4_t %10 +} + +define %struct.float64x2x4_t @test_vld1q_f64_x4(double* %a) { +; CHECK-LABEL: test_vld1q_f64_x4 +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8* %1, i32 8) + %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 0 + %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 1 + %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 2 + %6 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 3 + %7 = insertvalue %struct.float64x2x4_t undef, <2 x double> %3, 0, 0 + %8 = insertvalue %struct.float64x2x4_t %7, <2 x double> %4, 0, 1 + %9 = insertvalue %struct.float64x2x4_t %8, <2 x double> %5, 0, 2 + %10 = insertvalue %struct.float64x2x4_t %9, <2 x double> %6, 0, 3 + ret %struct.float64x2x4_t %10 +} + +define %struct.int8x8x4_t @test_vld1_s8_x4(i8* %a) { +; CHECK-LABEL: test_vld1_s8_x4 +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, +; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1) + %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 + %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1 + %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2 + %5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3 + %6 = insertvalue %struct.int8x8x4_t undef, <8 x i8> %2, 0, 0 + %7 = insertvalue %struct.int8x8x4_t %6, <8 x i8> %3, 0, 1 + %8 = insertvalue %struct.int8x8x4_t %7, <8 x i8> %4, 0, 2 + %9 = insertvalue %struct.int8x8x4_t %8, <8 x i8> %5, 0, 3 + ret %struct.int8x8x4_t %9 +} + +define %struct.int16x4x4_t @test_vld1_s16_x4(i16* %a) { +; CHECK-LABEL: test_vld1_s16_x4 +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, +; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %1 = bitcast i16* %a to i8* + %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8* %1, i32 2) + %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 0 + %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 1 + %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 2 + %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 3 + %7 = insertvalue %struct.int16x4x4_t undef, <4 x i16> %3, 0, 0 + %8 = insertvalue %struct.int16x4x4_t %7, <4 x i16> %4, 0, 1 + %9 = insertvalue %struct.int16x4x4_t %8, <4 x i16> %5, 0, 2 + %10 = insertvalue %struct.int16x4x4_t %9, <4 x i16> %6, 0, 3 + ret %struct.int16x4x4_t %10 +} + +define %struct.int32x2x4_t @test_vld1_s32_x4(i32* %a) { +; CHECK-LABEL: test_vld1_s32_x4 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast i32* %a to i8* + %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8* %1, i32 4) + %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 0 + %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 1 + %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 2 + %6 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 3 + %7 = insertvalue %struct.int32x2x4_t undef, <2 x i32> %3, 0, 0 + %8 = insertvalue %struct.int32x2x4_t %7, <2 x i32> %4, 0, 1 + %9 = insertvalue %struct.int32x2x4_t %8, <2 x i32> %5, 0, 2 + %10 = insertvalue %struct.int32x2x4_t %9, <2 x i32> %6, 0, 3 + ret %struct.int32x2x4_t %10 +} + +define %struct.int64x1x4_t @test_vld1_s64_x4(i64* %a) { +; CHECK-LABEL: test_vld1_s64_x4 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast i64* %a to i8* + %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8* %1, i32 8) + %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 0 + %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 1 + %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 2 + %6 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 3 + %7 = insertvalue %struct.int64x1x4_t undef, <1 x i64> %3, 0, 0 + %8 = insertvalue %struct.int64x1x4_t %7, <1 x i64> %4, 0, 1 + %9 = insertvalue %struct.int64x1x4_t %8, <1 x i64> %5, 0, 2 + %10 = insertvalue %struct.int64x1x4_t %9, <1 x i64> %6, 0, 3 + ret %struct.int64x1x4_t %10 +} + +define %struct.float32x2x4_t @test_vld1_f32_x4(float* %a) { +; CHECK-LABEL: test_vld1_f32_x4 +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = bitcast float* %a to i8* + %2 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8* %1, i32 4) + %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 0 + %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 1 + %5 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 2 + %6 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 3 + %7 = insertvalue %struct.float32x2x4_t undef, <2 x float> %3, 0, 0 + %8 = insertvalue %struct.float32x2x4_t %7, <2 x float> %4, 0, 1 + %9 = insertvalue %struct.float32x2x4_t %8, <2 x float> %5, 0, 2 + %10 = insertvalue %struct.float32x2x4_t %9, <2 x float> %6, 0, 3 + ret %struct.float32x2x4_t %10 +} + + +define %struct.float64x1x4_t @test_vld1_f64_x4(double* %a) { +; CHECK-LABEL: test_vld1_f64_x4 +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = bitcast double* %a to i8* + %2 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8* %1, i32 8) + %3 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 0 + %4 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 1 + %5 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 2 + %6 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 3 + %7 = insertvalue %struct.float64x1x4_t undef, <1 x double> %3, 0, 0 + %8 = insertvalue %struct.float64x1x4_t %7, <1 x double> %4, 0, 1 + %9 = insertvalue %struct.float64x1x4_t %8, <1 x double> %5, 0, 2 + %10 = insertvalue %struct.float64x1x4_t %9, <1 x double> %6, 0, 3 + ret %struct.float64x1x4_t %10 +} + +define void @test_vst1q_s8_x2(i8* %a, [2 x <16 x i8>] %b) { +; CHECK-LABEL: test_vst1q_s8_x2 +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <16 x i8>] %b, 0 + %2 = extractvalue [2 x <16 x i8>] %b, 1 + tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1) + ret void +} + +define void @test_vst1q_s16_x2(i16* %a, [2 x <8 x i16>] %b) { +; CHECK-LABEL: test_vst1q_s16_x2 +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <8 x i16>] %b, 0 + %2 = extractvalue [2 x <8 x i16>] %b, 1 + %3 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2) + ret void +} + +define void @test_vst1q_s32_x2(i32* %a, [2 x <4 x i32>] %b) { +; CHECK-LABEL: test_vst1q_s32_x2 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <4 x i32>] %b, 0 + %2 = extractvalue [2 x <4 x i32>] %b, 1 + %3 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v4i32(i8* %3, <4 x i32> %1, <4 x i32> %2, i32 4) + ret void +} + +define void @test_vst1q_s64_x2(i64* %a, [2 x <2 x i64>] %b) { +; CHECK-LABEL: test_vst1q_s64_x2 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <2 x i64>] %b, 0 + %2 = extractvalue [2 x <2 x i64>] %b, 1 + %3 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v2i64(i8* %3, <2 x i64> %1, <2 x i64> %2, i32 8) + ret void +} + +define void @test_vst1q_f32_x2(float* %a, [2 x <4 x float>] %b) { +; CHECK-LABEL: test_vst1q_f32_x2 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <4 x float>] %b, 0 + %2 = extractvalue [2 x <4 x float>] %b, 1 + %3 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v4f32(i8* %3, <4 x float> %1, <4 x float> %2, i32 4) + ret void +} + + +define void @test_vst1q_f64_x2(double* %a, [2 x <2 x double>] %b) { +; CHECK-LABEL: test_vst1q_f64_x2 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <2 x double>] %b, 0 + %2 = extractvalue [2 x <2 x double>] %b, 1 + %3 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v2f64(i8* %3, <2 x double> %1, <2 x double> %2, i32 8) + ret void +} + +define void @test_vst1_s8_x2(i8* %a, [2 x <8 x i8>] %b) { +; CHECK-LABEL: test_vst1_s8_x2 +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <8 x i8>] %b, 0 + %2 = extractvalue [2 x <8 x i8>] %b, 1 + tail call void @llvm.aarch64.neon.vst1x2.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 1) + ret void +} + +define void @test_vst1_s16_x2(i16* %a, [2 x <4 x i16>] %b) { +; CHECK-LABEL: test_vst1_s16_x2 +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <4 x i16>] %b, 0 + %2 = extractvalue [2 x <4 x i16>] %b, 1 + %3 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v4i16(i8* %3, <4 x i16> %1, <4 x i16> %2, i32 2) + ret void +} + +define void @test_vst1_s32_x2(i32* %a, [2 x <2 x i32>] %b) { +; CHECK-LABEL: test_vst1_s32_x2 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <2 x i32>] %b, 0 + %2 = extractvalue [2 x <2 x i32>] %b, 1 + %3 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v2i32(i8* %3, <2 x i32> %1, <2 x i32> %2, i32 4) + ret void +} + +define void @test_vst1_s64_x2(i64* %a, [2 x <1 x i64>] %b) { +; CHECK-LABEL: test_vst1_s64_x2 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <1 x i64>] %b, 0 + %2 = extractvalue [2 x <1 x i64>] %b, 1 + %3 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v1i64(i8* %3, <1 x i64> %1, <1 x i64> %2, i32 8) + ret void +} + +define void @test_vst1_f32_x2(float* %a, [2 x <2 x float>] %b) { +; CHECK-LABEL: test_vst1_f32_x2 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <2 x float>] %b, 0 + %2 = extractvalue [2 x <2 x float>] %b, 1 + %3 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v2f32(i8* %3, <2 x float> %1, <2 x float> %2, i32 4) + ret void +} + +define void @test_vst1_f64_x2(double* %a, [2 x <1 x double>] %b) { +; CHECK-LABEL: test_vst1_f64_x2 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [2 x <1 x double>] %b, 0 + %2 = extractvalue [2 x <1 x double>] %b, 1 + %3 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v1f64(i8* %3, <1 x double> %1, <1 x double> %2, i32 8) + ret void +} + +define void @test_vst1q_s8_x3(i8* %a, [3 x <16 x i8>] %b) { +; CHECK-LABEL: test_vst1q_s8_x3 +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <16 x i8>] %b, 0 + %2 = extractvalue [3 x <16 x i8>] %b, 1 + %3 = extractvalue [3 x <16 x i8>] %b, 2 + tail call void @llvm.aarch64.neon.vst1x3.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, i32 1) + ret void +} + +define void @test_vst1q_s16_x3(i16* %a, [3 x <8 x i16>] %b) { +; CHECK-LABEL: test_vst1q_s16_x3 +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <8 x i16>] %b, 0 + %2 = extractvalue [3 x <8 x i16>] %b, 1 + %3 = extractvalue [3 x <8 x i16>] %b, 2 + %4 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v8i16(i8* %4, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, i32 2) + ret void +} + +define void @test_vst1q_s32_x3(i32* %a, [3 x <4 x i32>] %b) { +; CHECK-LABEL: test_vst1q_s32_x3 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <4 x i32>] %b, 0 + %2 = extractvalue [3 x <4 x i32>] %b, 1 + %3 = extractvalue [3 x <4 x i32>] %b, 2 + %4 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v4i32(i8* %4, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, i32 4) + ret void +} + +define void @test_vst1q_s64_x3(i64* %a, [3 x <2 x i64>] %b) { +; CHECK-LABEL: test_vst1q_s64_x3 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <2 x i64>] %b, 0 + %2 = extractvalue [3 x <2 x i64>] %b, 1 + %3 = extractvalue [3 x <2 x i64>] %b, 2 + %4 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v2i64(i8* %4, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, i32 8) + ret void +} + +define void @test_vst1q_f32_x3(float* %a, [3 x <4 x float>] %b) { +; CHECK-LABEL: test_vst1q_f32_x3 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <4 x float>] %b, 0 + %2 = extractvalue [3 x <4 x float>] %b, 1 + %3 = extractvalue [3 x <4 x float>] %b, 2 + %4 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v4f32(i8* %4, <4 x float> %1, <4 x float> %2, <4 x float> %3, i32 4) + ret void +} + +define void @test_vst1q_f64_x3(double* %a, [3 x <2 x double>] %b) { +; CHECK-LABEL: test_vst1q_f64_x3 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <2 x double>] %b, 0 + %2 = extractvalue [3 x <2 x double>] %b, 1 + %3 = extractvalue [3 x <2 x double>] %b, 2 + %4 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v2f64(i8* %4, <2 x double> %1, <2 x double> %2, <2 x double> %3, i32 8) + ret void +} + +define void @test_vst1_s8_x3(i8* %a, [3 x <8 x i8>] %b) { +; CHECK-LABEL: test_vst1_s8_x3 +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <8 x i8>] %b, 0 + %2 = extractvalue [3 x <8 x i8>] %b, 1 + %3 = extractvalue [3 x <8 x i8>] %b, 2 + tail call void @llvm.aarch64.neon.vst1x3.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, i32 1) + ret void +} + +define void @test_vst1_s16_x3(i16* %a, [3 x <4 x i16>] %b) { +; CHECK-LABEL: test_vst1_s16_x3 +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <4 x i16>] %b, 0 + %2 = extractvalue [3 x <4 x i16>] %b, 1 + %3 = extractvalue [3 x <4 x i16>] %b, 2 + %4 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 2) + ret void +} + +define void @test_vst1_s32_x3(i32* %a, [3 x <2 x i32>] %b) { +; CHECK-LABEL: test_vst1_s32_x3 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <2 x i32>] %b, 0 + %2 = extractvalue [3 x <2 x i32>] %b, 1 + %3 = extractvalue [3 x <2 x i32>] %b, 2 + %4 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4) + ret void +} + +define void @test_vst1_s64_x3(i64* %a, [3 x <1 x i64>] %b) { +; CHECK-LABEL: test_vst1_s64_x3 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <1 x i64>] %b, 0 + %2 = extractvalue [3 x <1 x i64>] %b, 1 + %3 = extractvalue [3 x <1 x i64>] %b, 2 + %4 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8) + ret void +} + +define void @test_vst1_f32_x3(float* %a, [3 x <2 x float>] %b) { +; CHECK-LABEL: test_vst1_f32_x3 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <2 x float>] %b, 0 + %2 = extractvalue [3 x <2 x float>] %b, 1 + %3 = extractvalue [3 x <2 x float>] %b, 2 + %4 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v2f32(i8* %4, <2 x float> %1, <2 x float> %2, <2 x float> %3, i32 4) + ret void +} + +define void @test_vst1_f64_x3(double* %a, [3 x <1 x double>] %b) { +; CHECK-LABEL: test_vst1_f64_x3 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, +; [{{x[0-9]+|sp}}] + %1 = extractvalue [3 x <1 x double>] %b, 0 + %2 = extractvalue [3 x <1 x double>] %b, 1 + %3 = extractvalue [3 x <1 x double>] %b, 2 + %4 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v1f64(i8* %4, <1 x double> %1, <1 x double> %2, <1 x double> %3, i32 8) + ret void +} + +define void @test_vst1q_s8_x4(i8* %a, [4 x <16 x i8>] %b) { +; CHECK-LABEL: test_vst1q_s8_x4 +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, +; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <16 x i8>] %b, 0 + %2 = extractvalue [4 x <16 x i8>] %b, 1 + %3 = extractvalue [4 x <16 x i8>] %b, 2 + %4 = extractvalue [4 x <16 x i8>] %b, 3 + tail call void @llvm.aarch64.neon.vst1x4.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, <16 x i8> %4, i32 1) + ret void +} + +define void @test_vst1q_s16_x4(i16* %a, [4 x <8 x i16>] %b) { +; CHECK-LABEL: test_vst1q_s16_x4 +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, +; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <8 x i16>] %b, 0 + %2 = extractvalue [4 x <8 x i16>] %b, 1 + %3 = extractvalue [4 x <8 x i16>] %b, 2 + %4 = extractvalue [4 x <8 x i16>] %b, 3 + %5 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v8i16(i8* %5, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, <8 x i16> %4, i32 2) + ret void +} + +define void @test_vst1q_s32_x4(i32* %a, [4 x <4 x i32>] %b) { +; CHECK-LABEL: test_vst1q_s32_x4 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <4 x i32>] %b, 0 + %2 = extractvalue [4 x <4 x i32>] %b, 1 + %3 = extractvalue [4 x <4 x i32>] %b, 2 + %4 = extractvalue [4 x <4 x i32>] %b, 3 + %5 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v4i32(i8* %5, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, i32 4) + ret void +} + +define void @test_vst1q_s64_x4(i64* %a, [4 x <2 x i64>] %b) { +; CHECK-LABEL: test_vst1q_s64_x4 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <2 x i64>] %b, 0 + %2 = extractvalue [4 x <2 x i64>] %b, 1 + %3 = extractvalue [4 x <2 x i64>] %b, 2 + %4 = extractvalue [4 x <2 x i64>] %b, 3 + %5 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v2i64(i8* %5, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, <2 x i64> %4, i32 8) + ret void +} + +define void @test_vst1q_f32_x4(float* %a, [4 x <4 x float>] %b) { +; CHECK-LABEL: test_vst1q_f32_x4 +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <4 x float>] %b, 0 + %2 = extractvalue [4 x <4 x float>] %b, 1 + %3 = extractvalue [4 x <4 x float>] %b, 2 + %4 = extractvalue [4 x <4 x float>] %b, 3 + %5 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4) + ret void +} + +define void @test_vst1q_f64_x4(double* %a, [4 x <2 x double>] %b) { +; CHECK-LABEL: test_vst1q_f64_x4 +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <2 x double>] %b, 0 + %2 = extractvalue [4 x <2 x double>] %b, 1 + %3 = extractvalue [4 x <2 x double>] %b, 2 + %4 = extractvalue [4 x <2 x double>] %b, 3 + %5 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8) + ret void +} + +define void @test_vst1_s8_x4(i8* %a, [4 x <8 x i8>] %b) { +; CHECK-LABEL: test_vst1_s8_x4 +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, +; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <8 x i8>] %b, 0 + %2 = extractvalue [4 x <8 x i8>] %b, 1 + %3 = extractvalue [4 x <8 x i8>] %b, 2 + %4 = extractvalue [4 x <8 x i8>] %b, 3 + tail call void @llvm.aarch64.neon.vst1x4.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, <8 x i8> %4, i32 1) + ret void +} + +define void @test_vst1_s16_x4(i16* %a, [4 x <4 x i16>] %b) { +; CHECK-LABEL: test_vst1_s16_x4 +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, +; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <4 x i16>] %b, 0 + %2 = extractvalue [4 x <4 x i16>] %b, 1 + %3 = extractvalue [4 x <4 x i16>] %b, 2 + %4 = extractvalue [4 x <4 x i16>] %b, 3 + %5 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v4i16(i8* %5, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, <4 x i16> %4, i32 2) + ret void +} + +define void @test_vst1_s32_x4(i32* %a, [4 x <2 x i32>] %b) { +; CHECK-LABEL: test_vst1_s32_x4 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <2 x i32>] %b, 0 + %2 = extractvalue [4 x <2 x i32>] %b, 1 + %3 = extractvalue [4 x <2 x i32>] %b, 2 + %4 = extractvalue [4 x <2 x i32>] %b, 3 + %5 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v2i32(i8* %5, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 4) + ret void +} + +define void @test_vst1_s64_x4(i64* %a, [4 x <1 x i64>] %b) { +; CHECK-LABEL: test_vst1_s64_x4 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <1 x i64>] %b, 0 + %2 = extractvalue [4 x <1 x i64>] %b, 1 + %3 = extractvalue [4 x <1 x i64>] %b, 2 + %4 = extractvalue [4 x <1 x i64>] %b, 3 + %5 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v1i64(i8* %5, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, <1 x i64> %4, i32 8) + ret void +} + +define void @test_vst1_f32_x4(float* %a, [4 x <2 x float>] %b) { +; CHECK-LABEL: test_vst1_f32_x4 +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <2 x float>] %b, 0 + %2 = extractvalue [4 x <2 x float>] %b, 1 + %3 = extractvalue [4 x <2 x float>] %b, 2 + %4 = extractvalue [4 x <2 x float>] %b, 3 + %5 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v2f32(i8* %5, <2 x float> %1, <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 4) + ret void +} + +define void @test_vst1_f64_x4(double* %a, [4 x <1 x double>] %b) { +; CHECK-LABEL: test_vst1_f64_x4 +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] + %1 = extractvalue [4 x <1 x double>] %b, 0 + %2 = extractvalue [4 x <1 x double>] %b, 1 + %3 = extractvalue [4 x <1 x double>] %b, 2 + %4 = extractvalue [4 x <1 x double>] %b, 3 + %5 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v1f64(i8* %5, <1 x double> %1, <1 x double> %2, <1 x double> %3, <1 x double> %4, i32 8) + ret void +} + +declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8*, i32) +declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8*, i32) +declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8*, i32) +declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8*, i32) +declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8*, i32) +declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32) +declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8*, i32) +declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8*, i32) +declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8*, i32) +declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8*, i32) +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8*, i32) +declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8*, i32) +declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8*, i32) +declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32) +declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8*, i32) +declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32) +declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8*, i32) +declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8*, i32) +declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8*, i32) +declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8*, i32) +declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8*, i32) +declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x2.v2i64(i8*, <2 x i64>, <2 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x2.v4f32(i8*, <4 x float>, <4 x float>, i32) +declare void @llvm.aarch64.neon.vst1x2.v2f64(i8*, <2 x double>, <2 x double>, i32) +declare void @llvm.aarch64.neon.vst1x2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x2.v4i16(i8*, <4 x i16>, <4 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x2.v2i32(i8*, <2 x i32>, <2 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x2.v1i64(i8*, <1 x i64>, <1 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x2.v2f32(i8*, <2 x float>, <2 x float>, i32) +declare void @llvm.aarch64.neon.vst1x2.v1f64(i8*, <1 x double>, <1 x double>, i32) +declare void @llvm.aarch64.neon.vst1x3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) +declare void @llvm.aarch64.neon.vst1x3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32) +declare void @llvm.aarch64.neon.vst1x3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) +declare void @llvm.aarch64.neon.vst1x3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32) +declare void @llvm.aarch64.neon.vst1x4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) +declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) +declare void @llvm.aarch64.neon.vst1x4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) +declare void @llvm.aarch64.neon.vst1x4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32) diff --git a/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll index 5dd0aa88..156fe1d 100644 --- a/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll +++ b/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s +;Check for a post-increment updating load. define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind { ; CHECK: test_vld1_fx_update ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8 @@ -11,6 +12,7 @@ define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind { ret <4 x i16> %tmp1 } +;Check for a post-increment updating load with register increment. define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind { ; CHECK: test_vld1_reg_update ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}} @@ -81,7 +83,6 @@ define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind { ret <8 x i16> %tmp2 } -;Check for a post-increment updating load with register increment. define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind { ; CHECK: test_vld4_reg_update ; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}} @@ -93,7 +94,6 @@ define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind { ret <8 x i8> %tmp1 } -;Check for a post-increment updating store. define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind { ; CHECK: test_vst1_fx_update ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8 @@ -198,3 +198,157 @@ declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32 declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) + +define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) { +; CHECK: test_vld1x2_fx_update +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32 + %1 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1) + %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0 + %tmp1 = getelementptr i8* %a, i32 32 + store i8* %tmp1, i8** %ptr + ret <16 x i8> %2 +} + +define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc) { +; CHECK: test_vld1x2_reg_update +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = bitcast i16* %a to i8* + %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2) + %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0 + %tmp1 = getelementptr i16* %a, i32 %inc + store i16* %tmp1, i16** %ptr + ret <8 x i16> %3 +} + +define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) { +; CHECK: test_vld1x3_fx_update +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], #48 + %1 = bitcast i64* %a to i8* + %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8) + %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0 + %tmp1 = getelementptr i64* %a, i32 6 + store i64* %tmp1, i64** %ptr + ret <2 x i64> %3 +} + +define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc) { +; CHECK: test_vld1x3_reg_update +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = bitcast i16* %a to i8* + %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2) + %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0 + %tmp1 = getelementptr i16* %a, i32 %inc + store i16* %tmp1, i16** %ptr + ret <8 x i16> %3 +} + +define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) { +; CHECK: test_vld1x4_fx_update +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64 + %1 = bitcast float* %a to i8* + %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4) + %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0 + %tmp1 = getelementptr float* %a, i32 16 + store float* %tmp1, float** %ptr + ret <4 x float> %3 +} + +define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32 %inc) #0 { +; CHECK: test_vld1x4_reg_update +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1) + %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 + %tmp1 = getelementptr i8* %a, i32 %inc + store i8* %tmp1, i8** %ptr + ret <8 x i8> %2 +} + +define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce, i8** %ptr) #2 { +; CHECK: test_vst1x2_fx_update +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32 + %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0 + %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1 + tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1) + %tmp1 = getelementptr i8* %a, i32 32 + store i8* %tmp1, i8** %ptr + ret void +} + +define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce, i16** %ptr, i32 %inc) #2 { +; CHECK: test_vst1x2_reg_update +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0 + %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1 + %3 = bitcast i16* %a to i8* + tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2) + %tmp1 = getelementptr i16* %a, i32 %inc + store i16* %tmp1, i16** %ptr + ret void +} + +define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce, i32** %ptr) #2 { +; CHECK: test_vst1x3_fx_update +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #24 + %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0 + %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1 + %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2 + %4 = bitcast i32* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4) + %tmp1 = getelementptr i32* %a, i32 6 + store i32* %tmp1, i32** %ptr + ret void +} + +define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce, i64** %ptr, i32 %inc) #2 { +; CHECK: test_vst1x3_reg_update +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0 + %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1 + %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2 + %4 = bitcast i64* %a to i8* + tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8) + %tmp1 = getelementptr i64* %a, i32 %inc + store i64* %tmp1, i64** %ptr + ret void +} + +define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>] %b.coerce, float** %ptr) #2 { +; CHECK: test_vst1x4_fx_update +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64 + %1 = extractvalue [4 x <4 x float>] %b.coerce, 0 + %2 = extractvalue [4 x <4 x float>] %b.coerce, 1 + %3 = extractvalue [4 x <4 x float>] %b.coerce, 2 + %4 = extractvalue [4 x <4 x float>] %b.coerce, 3 + %5 = bitcast float* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4) + %tmp1 = getelementptr float* %a, i32 16 + store float* %tmp1, float** %ptr + ret void +} + +define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr, i32 %inc) #2 { +; CHECK: test_vst1x4_reg_update +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}} + %1 = extractvalue [4 x <2 x double>] %b.coerce, 0 + %2 = extractvalue [4 x <2 x double>] %b.coerce, 1 + %3 = extractvalue [4 x <2 x double>] %b.coerce, 2 + %4 = extractvalue [4 x <2 x double>] %b.coerce, 3 + %5 = bitcast double* %a to i8* + tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8) + %tmp1 = getelementptr double* %a, i32 %inc + store double* %tmp1, double** %ptr + ret void +} + +declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32) +declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32) +declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32) +declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32) +declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32) +declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32) +declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) +declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) +declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) +declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32) +declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #3 +declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) #3 |