aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/AArch64/arm64-ld1.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AArch64/arm64-ld1.ll')
-rw-r--r--test/CodeGen/AArch64/arm64-ld1.ll360
1 files changed, 180 insertions, 180 deletions
diff --git a/test/CodeGen/AArch64/arm64-ld1.ll b/test/CodeGen/AArch64/arm64-ld1.ll
index 72d808c..a83a270 100644
--- a/test/CodeGen/AArch64/arm64-ld1.ll
+++ b/test/CodeGen/AArch64/arm64-ld1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s
%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
@@ -8,8 +8,8 @@ define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind {
; CHECK-LABEL: ld2_8b
; Make sure we are loading into the results defined by the ABI (i.e., v0, v1)
; and from the argument of the function also defined by ABI (i.e., x0)
-; CHECK ld2.8b { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.8b { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x2_t %tmp2
}
@@ -17,8 +17,8 @@ define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind {
define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind {
; CHECK-LABEL: ld3_8b
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.8b { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.8b { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x3_t %tmp2
}
@@ -26,8 +26,8 @@ define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind {
define %struct.__neon_int8x8x4_t @ld4_8b(i8* %A) nounwind {
; CHECK-LABEL: ld4_8b
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.8b { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.8b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x4_t %tmp2
}
@@ -43,8 +43,8 @@ declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*) nounwind
define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind {
; CHECK-LABEL: ld2_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.16b { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.16b { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x2_t %tmp2
}
@@ -52,8 +52,8 @@ define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind {
define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind {
; CHECK-LABEL: ld3_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.16b { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.16b { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x3_t %tmp2
}
@@ -61,8 +61,8 @@ define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind {
define %struct.__neon_int8x16x4_t @ld4_16b(i8* %A) nounwind {
; CHECK-LABEL: ld4_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.16b { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.16b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x4_t %tmp2
}
@@ -78,8 +78,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*) nounwi
define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind {
; CHECK-LABEL: ld2_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.4h { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.4h { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x2_t %tmp2
}
@@ -87,8 +87,8 @@ define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind {
define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind {
; CHECK-LABEL: ld3_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.4h { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.4h { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x3_t %tmp2
}
@@ -96,8 +96,8 @@ define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind {
define %struct.__neon_int16x4x4_t @ld4_4h(i16* %A) nounwind {
; CHECK-LABEL: ld4_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.4h { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.4h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x4_t %tmp2
}
@@ -113,8 +113,8 @@ declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*) noun
define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind {
; CHECK-LABEL: ld2_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.8h { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.8h { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x2_t %tmp2
}
@@ -122,8 +122,8 @@ define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind {
define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind {
; CHECK-LABEL: ld3_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.8h { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.8h { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x3_t %tmp2
}
@@ -131,8 +131,8 @@ define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind {
define %struct.__neon_int16x8x4_t @ld4_8h(i16* %A) nounwind {
; CHECK-LABEL: ld4_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.8h { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.8h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x4_t %tmp2
}
@@ -148,8 +148,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*) noun
define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind {
; CHECK-LABEL: ld2_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.2s { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.2s { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x2_t %tmp2
}
@@ -157,8 +157,8 @@ define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind {
define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind {
; CHECK-LABEL: ld3_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.2s { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.2s { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x3_t %tmp2
}
@@ -166,8 +166,8 @@ define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind {
define %struct.__neon_int32x2x4_t @ld4_2s(i32* %A) nounwind {
; CHECK-LABEL: ld4_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.2s { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.2s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x4_t %tmp2
}
@@ -183,8 +183,8 @@ declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*) noun
define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind {
; CHECK-LABEL: ld2_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.4s { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.4s { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x2_t %tmp2
}
@@ -192,8 +192,8 @@ define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind {
define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind {
; CHECK-LABEL: ld3_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.4s { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.4s { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x3_t %tmp2
}
@@ -201,8 +201,8 @@ define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind {
define %struct.__neon_int32x4x4_t @ld4_4s(i32* %A) nounwind {
; CHECK-LABEL: ld4_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.4s { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.4s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x4_t %tmp2
}
@@ -218,8 +218,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*) noun
define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind {
; CHECK-LABEL: ld2_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld2.2d { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.2d { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x2_t %tmp2
}
@@ -227,8 +227,8 @@ define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind {
define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind {
; CHECK-LABEL: ld3_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld3.2d { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.2d { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x3_t %tmp2
}
@@ -236,8 +236,8 @@ define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind {
define %struct.__neon_int64x2x4_t @ld4_2d(i64* %A) nounwind {
; CHECK-LABEL: ld4_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld4.2d { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.2d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x4_t %tmp2
}
@@ -254,8 +254,8 @@ declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*) noun
define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind {
; CHECK-LABEL: ld2_1di64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x2_t %tmp2
}
@@ -263,8 +263,8 @@ define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind {
define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind {
; CHECK-LABEL: ld3_1di64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x3_t %tmp2
}
@@ -272,8 +272,8 @@ define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind {
define %struct.__neon_int64x1x4_t @ld4_1di64(i64* %A) nounwind {
; CHECK-LABEL: ld4_1di64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x4_t %tmp2
}
@@ -291,8 +291,8 @@ declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*) noun
define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind {
; CHECK-LABEL: ld2_1df64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
ret %struct.__neon_float64x1x2_t %tmp2
}
@@ -300,8 +300,8 @@ define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind {
define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind {
; CHECK-LABEL: ld3_1df64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
ret %struct.__neon_float64x1x3_t %tmp2
}
@@ -309,8 +309,8 @@ define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind {
define %struct.__neon_float64x1x4_t @ld4_1df64(double* %A) nounwind {
; CHECK-LABEL: ld4_1df64
; Make sure we are using the operands defined by the ABI
-; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
ret %struct.__neon_float64x1x4_t %tmp2
}
@@ -323,8 +323,8 @@ declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double*)
define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld2lane_16b
-; CHECK ld2.b { v0, v1 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.b { v0, v1 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, i64 1, i8* %A)
ret %struct.__neon_int8x16x2_t %tmp2
}
@@ -332,8 +332,8 @@ define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8*
define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i8* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld3lane_16b
-; CHECK ld3.b { v0, v1, v2 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.b { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, i8* %A)
ret %struct.__neon_int8x16x3_t %tmp2
}
@@ -341,8 +341,8 @@ define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16
define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i8* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld4lane_16b
-; CHECK ld4.b { v0, v1, v2, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.b { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, i8* %A)
ret %struct.__neon_int8x16x4_t %tmp2
}
@@ -354,8 +354,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i
define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld2lane_8h
-; CHECK ld2.h { v0, v1 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.h { v0, v1 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, i64 1, i16* %A)
ret %struct.__neon_int16x8x2_t %tmp2
}
@@ -363,8 +363,8 @@ define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16*
define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i16* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld3lane_8h
-; CHECK ld3.h { v0, v1, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.h { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, i16* %A)
ret %struct.__neon_int16x8x3_t %tmp2
}
@@ -372,8 +372,8 @@ define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x
define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i16* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld4lane_8h
-; CHECK ld4.h { v0, v1, v2, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.h { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, i16* %A)
ret %struct.__neon_int16x8x4_t %tmp2
}
@@ -385,8 +385,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i
define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld2lane_4s
-; CHECK ld2.s { v0, v1 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.s { v0, v1 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, i64 1, i32* %A)
ret %struct.__neon_int32x4x2_t %tmp2
}
@@ -394,8 +394,8 @@ define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32*
define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i32* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld3lane_4s
-; CHECK ld3.s { v0, v1, v2 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.s { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, i32* %A)
ret %struct.__neon_int32x4x3_t %tmp2
}
@@ -403,8 +403,8 @@ define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x
define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i32* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld4lane_4s
-; CHECK ld4.s { v0, v1, v2, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.s { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, i32* %A)
ret %struct.__neon_int32x4x4_t %tmp2
}
@@ -416,8 +416,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i
define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld2lane_2d
-; CHECK ld2.d { v0, v1 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld2.d { v0, v1 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, i64 1, i64* %A)
ret %struct.__neon_int64x2x2_t %tmp2
}
@@ -425,8 +425,8 @@ define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64*
define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld3lane_2d
-; CHECK ld3.d { v0, v1, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld3.d { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, i64* %A)
ret %struct.__neon_int64x2x3_t %tmp2
}
@@ -434,8 +434,8 @@ define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x
define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64* %A) nounwind {
; Make sure we are using the operands defined by the ABI
; CHECK: ld4lane_2d
-; CHECK ld4.d { v0, v1, v2, v3 }[1], [x0]
-; CHECK-NEXT ret
+; CHECK: ld4.d { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, i64* %A)
ret %struct.__neon_int64x2x4_t %tmp2
}
@@ -448,8 +448,8 @@ define <8 x i8> @ld1r_8b(i8* %bar) {
; CHECK: ld1r_8b
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.8b { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -465,8 +465,8 @@ define <16 x i8> @ld1r_16b(i8* %bar) {
; CHECK: ld1r_16b
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.16b { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -490,8 +490,8 @@ define <4 x i16> @ld1r_4h(i16* %bar) {
; CHECK: ld1r_4h
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4h { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
@@ -503,8 +503,8 @@ define <8 x i16> @ld1r_8h(i16* %bar) {
; CHECK: ld1r_8h
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.8h { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -520,8 +520,8 @@ define <2 x i32> @ld1r_2s(i32* %bar) {
; CHECK: ld1r_2s
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
ret <2 x i32> %tmp3
@@ -531,8 +531,8 @@ define <4 x i32> @ld1r_4s(i32* %bar) {
; CHECK: ld1r_4s
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
@@ -544,8 +544,8 @@ define <2 x i64> @ld1r_2d(i64* %bar) {
; CHECK: ld1r_2d
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp1 = load i64* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
ret <2 x i64> %tmp3
@@ -554,8 +554,8 @@ define <2 x i64> @ld1r_2d(i64* %bar) {
define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind {
; CHECK: ld2r_8b
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.8b { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.8b { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x2_t %tmp2
}
@@ -563,8 +563,8 @@ define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind {
define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind {
; CHECK: ld3r_8b
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.8b { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.8b { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x3_t %tmp2
}
@@ -572,8 +572,8 @@ define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind {
define %struct.__neon_int8x8x4_t @ld4r_8b(i8* %A) nounwind {
; CHECK: ld4r_8b
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.8b { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
ret %struct.__neon_int8x8x4_t %tmp2
}
@@ -585,8 +585,8 @@ declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwin
define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind {
; CHECK: ld2r_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.16b { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.16b { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x2_t %tmp2
}
@@ -594,8 +594,8 @@ define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind {
define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind {
; CHECK: ld3r_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.16b { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.16b { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x3_t %tmp2
}
@@ -603,8 +603,8 @@ define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind {
define %struct.__neon_int8x16x4_t @ld4r_16b(i8* %A) nounwind {
; CHECK: ld4r_16b
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.16b { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
ret %struct.__neon_int8x16x4_t %tmp2
}
@@ -616,8 +616,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounw
define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind {
; CHECK: ld2r_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.4h { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.4h { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x2_t %tmp2
}
@@ -625,8 +625,8 @@ define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind {
define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind {
; CHECK: ld3r_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.4h { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.4h { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x3_t %tmp2
}
@@ -634,8 +634,8 @@ define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind {
define %struct.__neon_int16x4x4_t @ld4r_4h(i16* %A) nounwind {
; CHECK: ld4r_4h
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.4h { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
ret %struct.__neon_int16x4x4_t %tmp2
}
@@ -647,8 +647,8 @@ declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nou
define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind {
; CHECK: ld2r_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.8h { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.8h { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x2_t %tmp2
}
@@ -656,8 +656,8 @@ define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind {
define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind {
; CHECK: ld3r_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.8h { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.8h { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x3_t %tmp2
}
@@ -665,8 +665,8 @@ define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind {
define %struct.__neon_int16x8x4_t @ld4r_8h(i16* %A) nounwind {
; CHECK: ld4r_8h
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.8h { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
ret %struct.__neon_int16x8x4_t %tmp2
}
@@ -678,8 +678,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nou
define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind {
; CHECK: ld2r_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.2s { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.2s { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x2_t %tmp2
}
@@ -687,8 +687,8 @@ define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind {
define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind {
; CHECK: ld3r_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.2s { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.2s { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x3_t %tmp2
}
@@ -696,8 +696,8 @@ define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind {
define %struct.__neon_int32x2x4_t @ld4r_2s(i32* %A) nounwind {
; CHECK: ld4r_2s
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.2s { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
ret %struct.__neon_int32x2x4_t %tmp2
}
@@ -709,8 +709,8 @@ declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nou
define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind {
; CHECK: ld2r_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.4s { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.4s { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x2_t %tmp2
}
@@ -718,8 +718,8 @@ define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind {
define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind {
; CHECK: ld3r_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.4s { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.4s { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x3_t %tmp2
}
@@ -727,8 +727,8 @@ define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind {
define %struct.__neon_int32x4x4_t @ld4r_4s(i32* %A) nounwind {
; CHECK: ld4r_4s
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.4s { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
ret %struct.__neon_int32x4x4_t %tmp2
}
@@ -740,8 +740,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nou
define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind {
; CHECK: ld2r_1d
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.1d { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.1d { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x2_t %tmp2
}
@@ -749,8 +749,8 @@ define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind {
define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind {
; CHECK: ld3r_1d
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.1d { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x3_t %tmp2
}
@@ -758,8 +758,8 @@ define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind {
define %struct.__neon_int64x1x4_t @ld4r_1d(i64* %A) nounwind {
; CHECK: ld4r_1d
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.1d { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
ret %struct.__neon_int64x1x4_t %tmp2
}
@@ -771,8 +771,8 @@ declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nou
define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind {
; CHECK: ld2r_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld2r.2d { v0, v1 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld2r.2d { v0, v1 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x2_t %tmp2
}
@@ -780,8 +780,8 @@ define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind {
define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind {
; CHECK: ld3r_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld3r.2d { v0, v1, v2 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld3r.2d { v0, v1, v2 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x3_t %tmp2
}
@@ -789,8 +789,8 @@ define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind {
define %struct.__neon_int64x2x4_t @ld4r_2d(i64* %A) nounwind {
; CHECK: ld4r_2d
; Make sure we are using the operands defined by the ABI
-; CHECK ld4r.2d { v0, v1, v2, v3 }, [x0]
-; CHECK-NEXT ret
+; CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT: ret
%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
ret %struct.__neon_int64x2x4_t %tmp2
}
@@ -803,8 +803,8 @@ define <16 x i8> @ld1_16b(<16 x i8> %V, i8* %bar) {
; CHECK-LABEL: ld1_16b
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.b { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
ret <16 x i8> %tmp2
}
@@ -813,8 +813,8 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, i16* %bar) {
; CHECK-LABEL: ld1_8h
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.h { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0
ret <8 x i16> %tmp2
}
@@ -823,8 +823,8 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, i32* %bar) {
; CHECK-LABEL: ld1_4s
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0
ret <4 x i32> %tmp2
}
@@ -833,8 +833,8 @@ define <4 x float> @ld1_4s_float(<4 x float> %V, float* %bar) {
; CHECK-LABEL: ld1_4s_float:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load float* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> %V, float %tmp1, i32 0
ret <4 x float> %tmp2
}
@@ -843,8 +843,8 @@ define <2 x i64> @ld1_2d(<2 x i64> %V, i64* %bar) {
; CHECK-LABEL: ld1_2d
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.d { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i64* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0
ret <2 x i64> %tmp2
}
@@ -853,8 +853,8 @@ define <2 x double> @ld1_2d_double(<2 x double> %V, double* %bar) {
; CHECK-LABEL: ld1_2d_double:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.d { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load double* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> %V, double %tmp1, i32 0
ret <2 x double> %tmp2
}
@@ -864,7 +864,7 @@ define <1 x i64> @ld1_1d(<1 x i64>* %p) {
; Make sure we are using the operands defined by the ABI
; CHECK: ldr [[REG:d[0-9]+]], [x0]
; CHECK-NEXT: ret
- %tmp = load <1 x i64>* %p, align 8
+ %tmp = load <1 x i64>, <1 x i64>* %p, align 8
ret <1 x i64> %tmp
}
@@ -872,8 +872,8 @@ define <8 x i8> @ld1_8b(<8 x i8> %V, i8* %bar) {
; CHECK-LABEL: ld1_8b
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.b { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0
ret <8 x i8> %tmp2
}
@@ -882,8 +882,8 @@ define <4 x i16> @ld1_4h(<4 x i16> %V, i16* %bar) {
; CHECK-LABEL: ld1_4h
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.h { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> %V, i16 %tmp1, i32 0
ret <4 x i16> %tmp2
}
@@ -892,8 +892,8 @@ define <2 x i32> @ld1_2s(<2 x i32> %V, i32* %bar) {
; CHECK-LABEL: ld1_2s:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> %V, i32 %tmp1, i32 0
ret <2 x i32> %tmp2
}
@@ -902,8 +902,8 @@ define <2 x float> @ld1_2s_float(<2 x float> %V, float* %bar) {
; CHECK-LABEL: ld1_2s_float:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
-; CHECK-NEXT ret
- %tmp1 = load float* %bar
+; CHECK-NEXT: ret
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> %V, float %tmp1, i32 0
ret <2 x float> %tmp2
}
@@ -919,12 +919,12 @@ entry:
; CHECK-NEXT: str d[[RESREGNUM]], [x2]
; CHECK-NEXT: ret
%tmp = bitcast i8* %a to i32*
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
%tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
%lane = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
%tmp3 = bitcast <2 x i32> %lane to <8 x i8>
%tmp4 = bitcast i8* %b to i32*
- %tmp5 = load i32* %tmp4, align 4
+ %tmp5 = load i32, i32* %tmp4, align 4
%tmp6 = insertelement <2 x i32> undef, i32 %tmp5, i32 0
%lane1 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> zeroinitializer
%tmp7 = bitcast <2 x i32> %lane1 to <8 x i8>
@@ -945,8 +945,8 @@ entry:
; CHECK-LABEL: ld1r_4s_float
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
%tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1
%tmp3 = insertelement <4 x float> %tmp2, float %tmp, i32 2
@@ -959,8 +959,8 @@ entry:
; CHECK-LABEL: ld1r_2s_float
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
%tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1
ret <2 x float> %tmp2
@@ -971,8 +971,8 @@ entry:
; CHECK-LABEL: ld1r_2d_double
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
%tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1
ret <2 x double> %tmp2
@@ -983,8 +983,8 @@ entry:
; CHECK-LABEL: ld1r_1d_double
; Make sure we are using the operands defined by the ABI
; CHECK: ldr d0, [x0]
-; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
ret <1 x double> %tmp1
}
@@ -994,8 +994,8 @@ entry:
; CHECK-LABEL: ld1r_4s_float_shuff
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
%lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %lane
@@ -1006,8 +1006,8 @@ entry:
; CHECK-LABEL: ld1r_2s_float_shuff
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
%lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
ret <2 x float> %lane
@@ -1018,8 +1018,8 @@ entry:
; CHECK-LABEL: ld1r_2d_double_shuff
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
-; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
%lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %lane
@@ -1030,8 +1030,8 @@ entry:
; CHECK-LABEL: ld1r_1d_double_shuff
; Make sure we are using the operands defined by the ABI
; CHECK: ldr d0, [x0]
-; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+; CHECK-NEXT: ret
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
%lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer
ret <1 x double> %lane