aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll')
-rw-r--r--test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll58
1 files changed, 29 insertions, 29 deletions
diff --git a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
index 64d91ee..2170e4b 100644
--- a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
+++ b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -22,7 +22,7 @@ target triple = "aarch64--linux-gnu"
define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
%add = add nsw i64 %mul, %a
ret i64 %add
@@ -41,7 +41,7 @@ entry:
define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
%add = add nsw i32 %mul, %a
ret i32 %add
@@ -56,7 +56,7 @@ entry:
define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
%sub = sub nsw i64 %a, %mul
ret i64 %sub
@@ -72,7 +72,7 @@ entry:
define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
%sub = sub nsw i32 %a, %mul
ret i32 %sub
@@ -87,7 +87,7 @@ entry:
define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
ret i64 %mul
}
@@ -101,7 +101,7 @@ entry:
define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
ret i32 %mul
}
@@ -115,7 +115,7 @@ entry:
define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = sub i64 0, %b
%sub = mul i64 %0, %mul
ret i64 %sub
@@ -133,7 +133,7 @@ entry:
define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = sub i32 0, %b
%sub = mul i32 %0, %mul
ret i32 %sub
@@ -154,7 +154,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
%add = add nsw i64 %mul, %a
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%add3 = add nsw i64 %add, %conv2
ret i64 %add3
@@ -174,7 +174,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
%sub = sub i64 %a, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%add = add nsw i64 %sub, %conv2
ret i64 %add
@@ -193,7 +193,7 @@ entry:
%conv = sext i32 %b to i64
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%div = sdiv i64 %mul, %conv2
ret i64 %div
@@ -212,7 +212,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = sub nsw i64 0, %conv
%sub = mul i64 %conv1, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%div = sdiv i64 %sub, %conv2
ret i64 %div
@@ -229,7 +229,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
%add = add i64 %mul, %a
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%add3 = add i64 %add, %conv2
ret i64 %add3
@@ -249,7 +249,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
%sub = sub i64 %a, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%add = add i64 %sub, %conv2
ret i64 %add
@@ -268,7 +268,7 @@ entry:
%conv = zext i32 %b to i64
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%div = udiv i64 %mul, %conv2
ret i64 %div
@@ -287,7 +287,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = sub nsw i64 0, %conv
%sub = mul i64 %conv1, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%div = udiv i64 %sub, %conv2
ret i64 %div
@@ -300,7 +300,7 @@ entry:
define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
%add = add nsw i64 %mul, %a
@@ -317,7 +317,7 @@ entry:
define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
%add = add nsw i32 %mul, %a
@@ -333,7 +333,7 @@ entry:
define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
%sub = sub nsw i64 %a, %mul
@@ -350,7 +350,7 @@ entry:
define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
%sub = sub nsw i32 %a, %mul
@@ -366,7 +366,7 @@ entry:
define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
ret i64 %mul
@@ -381,7 +381,7 @@ entry:
define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
ret i32 %mul
@@ -396,7 +396,7 @@ entry:
define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
%mul = mul nsw i64 %0, %b
@@ -415,7 +415,7 @@ declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #2
define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1)
%mul = mul nsw i32 %0, %b
@@ -431,7 +431,7 @@ entry:
define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1)
%mul = mul nsw i64 %0, %b
@@ -448,7 +448,7 @@ entry:
define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1)
%mul = mul nsw i32 %0, %b
@@ -464,7 +464,7 @@ entry:
define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
%mul = mul nsw i64 %0, %b
@@ -479,7 +479,7 @@ entry:
define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1)
%mul = mul nsw i32 %0, %b
@@ -494,7 +494,7 @@ entry:
define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
br label %block1
block1: