aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-05-31 13:25:22 +0000
committerRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-05-31 13:25:22 +0000
commitb6606e46abad12a112a57048caec2142522bc67d (patch)
tree1f4b77d559242b7a3b7a40cd31bf7726206e71bc /test
parent5443e7d79044f3198f2da044f1b389b40d9bea6f (diff)
downloadexternal_llvm-b6606e46abad12a112a57048caec2142522bc67d.zip
external_llvm-b6606e46abad12a112a57048caec2142522bc67d.tar.gz
external_llvm-b6606e46abad12a112a57048caec2142522bc67d.tar.bz2
[SystemZ] Don't use LOAD and STORE REVERSED for volatile accesses
Unlike most -- hopefully "all other", but I'm still checking -- memory instructions we support, LOAD REVERSED and STORE REVERSED may access the memory location several times. This means that they are not suitable for volatile loads and stores. This patch is a prerequisite for better atomic load and store support. The same principle applies there: almost all memory instructions we support are inherently atomic ("block concurrent"), but LOAD REVERSED and STORE REVERSED are exceptions. Other instructions continue to allow volatile operands. I will add positive "allows volatile" tests at the same time as the "allows atomic load or store" tests. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@183002 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/SystemZ/bswap-02.ll12
-rw-r--r--test/CodeGen/SystemZ/bswap-03.ll12
-rw-r--r--test/CodeGen/SystemZ/bswap-04.ll36
-rw-r--r--test/CodeGen/SystemZ/bswap-05.ll36
4 files changed, 72 insertions, 24 deletions
diff --git a/test/CodeGen/SystemZ/bswap-02.ll b/test/CodeGen/SystemZ/bswap-02.ll
index e9b7eb5..8b99077 100644
--- a/test/CodeGen/SystemZ/bswap-02.ll
+++ b/test/CodeGen/SystemZ/bswap-02.ll
@@ -85,3 +85,15 @@ define i32 @f7(i64 %src, i64 %index) {
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
}
+
+; Check that volatile accesses do not use LRV, which might access the
+; storage multple times.
+define i32 @f8(i32 *%src) {
+; CHECK: f8:
+; CHECK: l [[REG:%r[0-5]]], 0(%r2)
+; CHECK: lrvr %r2, [[REG]]
+; CHECK: br %r14
+ %a = load volatile i32 *%src
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
diff --git a/test/CodeGen/SystemZ/bswap-03.ll b/test/CodeGen/SystemZ/bswap-03.ll
index 2e6bcdc..df6624e 100644
--- a/test/CodeGen/SystemZ/bswap-03.ll
+++ b/test/CodeGen/SystemZ/bswap-03.ll
@@ -85,3 +85,15 @@ define i64 @f7(i64 %src, i64 %index) {
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
}
+
+; Check that volatile accesses do not use LRVG, which might access the
+; storage multple times.
+define i64 @f8(i64 *%src) {
+; CHECK: f8:
+; CHECK: lg [[REG:%r[0-5]]], 0(%r2)
+; CHECK: lrvgr %r2, [[REG]]
+; CHECK: br %r14
+ %a = load volatile i64 *%src
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
diff --git a/test/CodeGen/SystemZ/bswap-04.ll b/test/CodeGen/SystemZ/bswap-04.ll
index 192327b..63b2020 100644
--- a/test/CodeGen/SystemZ/bswap-04.ll
+++ b/test/CodeGen/SystemZ/bswap-04.ll
@@ -5,21 +5,21 @@
declare i32 @llvm.bswap.i32(i32 %a)
; Check STRV with no displacement.
-define void @f1(i32 *%src, i32 %a) {
+define void @f1(i32 *%dst, i32 %a) {
; CHECK: f1:
; CHECK: strv %r3, 0(%r2)
; CHECK: br %r14
%swapped = call i32 @llvm.bswap.i32(i32 %a)
- store i32 %swapped, i32 *%src
+ store i32 %swapped, i32 *%dst
ret void
}
; Check the high end of the aligned STRV range.
-define void @f2(i32 *%src, i32 %a) {
+define void @f2(i32 *%dst, i32 %a) {
; CHECK: f2:
; CHECK: strv %r3, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32 *%dst, i64 131071
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
@@ -27,34 +27,34 @@ define void @f2(i32 *%src, i32 %a) {
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f3(i32 *%src, i32 %a) {
+define void @f3(i32 *%dst, i32 %a) {
; CHECK: f3:
; CHECK: agfi %r2, 524288
; CHECK: strv %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32 *%dst, i64 131072
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
}
; Check the high end of the negative aligned STRV range.
-define void @f4(i32 *%src, i32 %a) {
+define void @f4(i32 *%dst, i32 %a) {
; CHECK: f4:
; CHECK: strv %r3, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32 *%dst, i64 -1
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
}
; Check the low end of the STRV range.
-define void @f5(i32 *%src, i32 %a) {
+define void @f5(i32 *%dst, i32 %a) {
; CHECK: f5:
; CHECK: strv %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32 *%dst, i64 -131072
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
@@ -62,12 +62,12 @@ define void @f5(i32 *%src, i32 %a) {
; Check the next word down, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f6(i32 *%src, i32 %a) {
+define void @f6(i32 *%dst, i32 %a) {
; CHECK: f6:
; CHECK: agfi %r2, -524292
; CHECK: strv %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32 *%dst, i64 -131073
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
@@ -85,3 +85,15 @@ define void @f7(i64 %src, i64 %index, i32 %a) {
store i32 %swapped, i32 *%ptr
ret void
}
+
+; Check that volatile stores do not use STRV, which might access the
+; storage multple times.
+define void @f8(i32 *%dst, i32 %a) {
+; CHECK: f8:
+; CHECK: lrvr [[REG:%r[0-5]]], %r3
+; CHECK: st [[REG]], 0(%r2)
+; CHECK: br %r14
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store volatile i32 %swapped, i32 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/bswap-05.ll b/test/CodeGen/SystemZ/bswap-05.ll
index e58cb80..6f25d3c 100644
--- a/test/CodeGen/SystemZ/bswap-05.ll
+++ b/test/CodeGen/SystemZ/bswap-05.ll
@@ -5,21 +5,21 @@
declare i64 @llvm.bswap.i64(i64 %a)
; Check STRVG with no displacement.
-define void @f1(i64 *%src, i64 %a) {
+define void @f1(i64 *%dst, i64 %a) {
; CHECK: f1:
; CHECK: strvg %r3, 0(%r2)
; CHECK: br %r14
%swapped = call i64 @llvm.bswap.i64(i64 %a)
- store i64 %swapped, i64 *%src
+ store i64 %swapped, i64 *%dst
ret void
}
; Check the high end of the aligned STRVG range.
-define void @f2(i64 *%src, i64 %a) {
+define void @f2(i64 *%dst, i64 %a) {
; CHECK: f2:
; CHECK: strvg %r3, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64 *%dst, i64 65535
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
@@ -27,34 +27,34 @@ define void @f2(i64 *%src, i64 %a) {
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f3(i64 *%src, i64 %a) {
+define void @f3(i64 *%dst, i64 %a) {
; CHECK: f3:
; CHECK: agfi %r2, 524288
; CHECK: strvg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64 *%dst, i64 65536
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
}
; Check the high end of the negative aligned STRVG range.
-define void @f4(i64 *%src, i64 %a) {
+define void @f4(i64 *%dst, i64 %a) {
; CHECK: f4:
; CHECK: strvg %r3, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64 *%dst, i64 -1
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
}
; Check the low end of the STRVG range.
-define void @f5(i64 *%src, i64 %a) {
+define void @f5(i64 *%dst, i64 %a) {
; CHECK: f5:
; CHECK: strvg %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64 *%dst, i64 -65536
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
@@ -62,12 +62,12 @@ define void @f5(i64 *%src, i64 %a) {
; Check the next doubleword down, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f6(i64 *%src, i64 %a) {
+define void @f6(i64 *%dst, i64 %a) {
; CHECK: f6:
; CHECK: agfi %r2, -524296
; CHECK: strvg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64 *%dst, i64 -65537
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
@@ -85,3 +85,15 @@ define void @f7(i64 %src, i64 %index, i64 %a) {
store i64 %swapped, i64 *%ptr
ret void
}
+
+; Check that volatile stores do not use STRVG, which might access the
+; storage multple times.
+define void @f8(i64 *%dst, i64 %a) {
+; CHECK: f8:
+; CHECK: lrvgr [[REG:%r[0-5]]], %r3
+; CHECK: stg [[REG]], 0(%r2)
+; CHECK: br %r14
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store volatile i64 %swapped, i64 *%dst
+ ret void
+}