aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/SystemZ/cmpxchg-04.ll
diff options
context:
space:
mode:
authorUlrich Weigand <ulrich.weigand@de.ibm.com>2013-05-06 16:17:29 +0000
committerUlrich Weigand <ulrich.weigand@de.ibm.com>2013-05-06 16:17:29 +0000
commitb503b49b5105b6aad7d2a015468b84b0f64dfe8e (patch)
treea60966043fae51838cb2faa08531a7ed078e4fb6 /test/CodeGen/SystemZ/cmpxchg-04.ll
parent1d09d56fe1e3f3faadd4bf4ccf3e585ddb3c3b07 (diff)
downloadexternal_llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.zip
external_llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.tar.gz
external_llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.tar.bz2
[SystemZ] Add CodeGen test cases
This adds all CodeGen tests for the SystemZ target. This version of the patch incorporates feedback from a review by Sean Silva. Thanks to all reviewers! Patch by Richard Sandiford. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181204 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/cmpxchg-04.ll')
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll98
1 files changed, 98 insertions, 0 deletions
diff --git a/test/CodeGen/SystemZ/cmpxchg-04.ll b/test/CodeGen/SystemZ/cmpxchg-04.ll
new file mode 100644
index 0000000..f8969ee
--- /dev/null
+++ b/test/CodeGen/SystemZ/cmpxchg-04.ll
@@ -0,0 +1,98 @@
+; Test 64-bit compare and swap.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check CSG without a displacement.
+define i64 @f1(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f1:
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the high end of the aligned CSG range.
+define i64 @f2(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f2:
+; CHECK: csg %r2, %r3, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f3:
+; CHECK: agfi %r4, 524288
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the high end of the negative aligned CSG range.
+define i64 @f4(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f4:
+; CHECK: csg %r2, %r3, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the low end of the CSG range.
+define i64 @f5(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f5:
+; CHECK: csg %r2, %r3, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r4, -524296
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that CSG does not allow an index.
+define i64 @f7(i64 %cmp, i64 %swap, i64 %src, i64 %index) {
+; CHECK: f7:
+; CHECK: agr %r4, %r5
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %ptr = inttoptr i64 %add1 to i64 *
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that a constant %cmp value is loaded into a register first.
+define i64 @f8(i64 %dummy, i64 %swap, i64 *%ptr) {
+; CHECK: f8:
+; CHECK: lghi %r2, 1001
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that a constant %swap value is loaded into a register first.
+define i64 @f9(i64 %cmp, i64 *%ptr) {
+; CHECK: f9:
+; CHECK: lghi [[SWAP:%r[0-9]+]], 1002
+; CHECK: csg %r2, [[SWAP]], 0(%r3)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst
+ ret i64 %val
+}