aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-08-19 12:42:31 +0000
committerRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-08-19 12:42:31 +0000
commit80f54784da0bd42fb79176bbf447a31d69287fe3 (patch)
treeb20cb02fd15a201dc7f0d31482e031916ad2ab86 /test
parent2063637fa7c9ebc880cf858674eb45727d4ea295 (diff)
downloadexternal_llvm-80f54784da0bd42fb79176bbf447a31d69287fe3.zip
external_llvm-80f54784da0bd42fb79176bbf447a31d69287fe3.tar.gz
external_llvm-80f54784da0bd42fb79176bbf447a31d69287fe3.tar.bz2
[SystemZ] Add support for sibling calls
This first cut is pretty conservative. The final argument register (R6) is call-saved, so we would need to make sure that the R6 argument to a sibling call is the same as the R6 argument to the calling function, which seems worth keeping as a separate patch. Saying that integer truncations are free means that we no longer use the extending instructions LGF and LLGF for spills in int-conv-09.ll and int-conv-10.ll. Instead we treat the registers as 64 bits wide and truncate them to 32-bits where necessary. I think it's unlikely we'd use LGF and LLGF for spills in other situations for the same reason, so I'm removing the tests rather than replacing them. The associated code is generic and applies to many more instructions than just LGF and LLGF, so there is no corresponding code removal. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188669 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/SystemZ/call-03.ll125
-rw-r--r--test/CodeGen/SystemZ/int-conv-09.ll77
-rw-r--r--test/CodeGen/SystemZ/int-conv-10.ll77
3 files changed, 125 insertions, 154 deletions
diff --git a/test/CodeGen/SystemZ/call-03.ll b/test/CodeGen/SystemZ/call-03.ll
new file mode 100644
index 0000000..1f314ea
--- /dev/null
+++ b/test/CodeGen/SystemZ/call-03.ll
@@ -0,0 +1,125 @@
+; Test sibling calls.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare void @ok(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ float %f4, double %f6)
+declare void @uses_r6(i8 %r2, i16 %r3, i32 %r4, i64 %r5, i64 %r6)
+declare void @uses_indirect(fp128 %r2)
+declare void @uses_stack(float %f0, float %f2, float %f4, float %f6,
+ float %stack)
+declare i32 @returns_i32()
+declare i64 @returns_i64()
+
+; Check the maximum number of arguments that we can pass and still use
+; a sibling call.
+define void @f1() {
+; CHECK-LABEL: f1:
+; CHECK-DAG: lzer %f0
+; CHECK-DAG: lzdr %f2
+; CHECK-DAG: lhi %r2, 1
+; CHECK-DAG: lhi %r3, 2
+; CHECK-DAG: lhi %r4, 3
+; CHECK-DAG: lghi %r5, 4
+; CHECK-DAG: {{ler %f4, %f0|lzer %f4}}
+; CHECK-DAG: {{ldr %f6, %f2|lzdr %f6}}
+; CHECK: jg ok@PLT
+ tail call void @ok(i8 1, i16 2, i32 3, i64 4, float 0.0, double 0.0,
+ float 0.0, double 0.0)
+ ret void
+}
+
+; Check a call that uses %r6 to pass an argument. At the moment we don't
+; use sibling calls in that case.
+define void @f2() {
+; CHECK-LABEL: f2:
+; CHECK: brasl %r14, uses_r6@PLT
+; CHECK: br %r14
+ tail call void @uses_r6(i8 1, i16 2, i32 3, i64 4, i64 5)
+ ret void
+}
+
+; Check a call that passes indirect arguments. We can't use sibling
+; calls in that case.
+define void @f3() {
+; CHECK-LABEL: f3:
+; CHECK: brasl %r14, uses_indirect@PLT
+; CHECK: br %r14
+ tail call void @uses_indirect(fp128 0xL00000000000000000000000000000000)
+ ret void
+}
+
+; Check a call that uses direct stack arguments, which again prevents
+; sibling calls
+define void @f4() {
+; CHECK-LABEL: f4:
+; CHECK: brasl %r14, uses_stack@PLT
+; CHECK: br %r14
+ tail call void @uses_stack(float 0.0, float 0.0, float 0.0, float 0.0,
+ float 0.0)
+ ret void
+}
+
+; Check an indirect call. In this case the only acceptable choice for
+; the target register is %r1.
+define void @f5(void(i32, i32, i32, i32) *%foo) {
+; CHECK-LABEL: f5:
+; CHECK: lgr %r1, %r2
+; CHECK-DAG: lhi %r2, 1
+; CHECK-DAG: lhi %r3, 2
+; CHECK-DAG: lhi %r4, 3
+; CHECK-DAG: lhi %r5, 4
+; CHECK: br %r1
+ tail call void %foo(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; Check an indirect call that will be forced into a call-saved GPR
+; (which should be %r13, the highest GPR not used for anything else).
+define void @f6(void(i32) *%foo) {
+; CHECK-LABEL: f6:
+; CHECK: stmg %r13, %r15, 104(%r15)
+; CHECK: lgr %r13, %r2
+; CHECK: brasl %r14, returns_i32
+; CHECK: lgr %r1, %r13
+; CHECK: lmg %r13, %r15, 264(%r15)
+; CHECK: br %r1
+ %arg = call i32 @returns_i32()
+ tail call void %foo(i32 %arg)
+ ret void
+}
+
+; Test a function that returns a value.
+define i64 @f7() {
+; CHECK-LABEL: f7:
+; CHECK: jg returns_i64@PLT
+ %res = tail call i64 @returns_i64()
+ ret i64 %res
+}
+
+; Test a function that returns a value truncated from i64 to i32.
+define i32 @f8() {
+; CHECK-LABEL: f8:
+; CHECK: jg returns_i64@PLT
+ %res = tail call i64 @returns_i64()
+ %trunc = trunc i64 %res to i32
+ ret i32 %trunc
+}
+
+; Test a function that returns a value truncated from i64 to i7.
+define i7 @f9() {
+; CHECK-LABEL: f9:
+; CHECK: jg returns_i64@PLT
+ %res = tail call i64 @returns_i64()
+ %trunc = trunc i64 %res to i7
+ ret i7 %trunc
+}
+
+; Test a function that returns a value truncated from i32 to i8.
+define i8 @f10() {
+; CHECK-LABEL: f10:
+; CHECK: jg returns_i32@PLT
+ %res = tail call i32 @returns_i32()
+ %trunc = trunc i32 %res to i8
+ ret i8 %trunc
+}
diff --git a/test/CodeGen/SystemZ/int-conv-09.ll b/test/CodeGen/SystemZ/int-conv-09.ll
index db4c333..b9c5089 100644
--- a/test/CodeGen/SystemZ/int-conv-09.ll
+++ b/test/CodeGen/SystemZ/int-conv-09.ll
@@ -102,80 +102,3 @@ define i64 @f9(i64 %src, i64 %index) {
%ext = sext i32 %word to i64
ret i64 %ext
}
-
-; Test a case where we spill the source of at least one LGFR. We want
-; to use LGF if possible.
-define void @f10(i64 *%ptr1, i32 *%ptr2) {
-; CHECK-LABEL: f10:
-; CHECK: lgf {{%r[0-9]+}}, 16{{[04]}}(%r15)
-; CHECK: br %r14
- %val0 = load volatile i32 *%ptr2
- %val1 = load volatile i32 *%ptr2
- %val2 = load volatile i32 *%ptr2
- %val3 = load volatile i32 *%ptr2
- %val4 = load volatile i32 *%ptr2
- %val5 = load volatile i32 *%ptr2
- %val6 = load volatile i32 *%ptr2
- %val7 = load volatile i32 *%ptr2
- %val8 = load volatile i32 *%ptr2
- %val9 = load volatile i32 *%ptr2
- %val10 = load volatile i32 *%ptr2
- %val11 = load volatile i32 *%ptr2
- %val12 = load volatile i32 *%ptr2
- %val13 = load volatile i32 *%ptr2
- %val14 = load volatile i32 *%ptr2
- %val15 = load volatile i32 *%ptr2
-
- %ext0 = sext i32 %val0 to i64
- %ext1 = sext i32 %val1 to i64
- %ext2 = sext i32 %val2 to i64
- %ext3 = sext i32 %val3 to i64
- %ext4 = sext i32 %val4 to i64
- %ext5 = sext i32 %val5 to i64
- %ext6 = sext i32 %val6 to i64
- %ext7 = sext i32 %val7 to i64
- %ext8 = sext i32 %val8 to i64
- %ext9 = sext i32 %val9 to i64
- %ext10 = sext i32 %val10 to i64
- %ext11 = sext i32 %val11 to i64
- %ext12 = sext i32 %val12 to i64
- %ext13 = sext i32 %val13 to i64
- %ext14 = sext i32 %val14 to i64
- %ext15 = sext i32 %val15 to i64
-
- store volatile i32 %val0, i32 *%ptr2
- store volatile i32 %val1, i32 *%ptr2
- store volatile i32 %val2, i32 *%ptr2
- store volatile i32 %val3, i32 *%ptr2
- store volatile i32 %val4, i32 *%ptr2
- store volatile i32 %val5, i32 *%ptr2
- store volatile i32 %val6, i32 *%ptr2
- store volatile i32 %val7, i32 *%ptr2
- store volatile i32 %val8, i32 *%ptr2
- store volatile i32 %val9, i32 *%ptr2
- store volatile i32 %val10, i32 *%ptr2
- store volatile i32 %val11, i32 *%ptr2
- store volatile i32 %val12, i32 *%ptr2
- store volatile i32 %val13, i32 *%ptr2
- store volatile i32 %val14, i32 *%ptr2
- store volatile i32 %val15, i32 *%ptr2
-
- store volatile i64 %ext0, i64 *%ptr1
- store volatile i64 %ext1, i64 *%ptr1
- store volatile i64 %ext2, i64 *%ptr1
- store volatile i64 %ext3, i64 *%ptr1
- store volatile i64 %ext4, i64 *%ptr1
- store volatile i64 %ext5, i64 *%ptr1
- store volatile i64 %ext6, i64 *%ptr1
- store volatile i64 %ext7, i64 *%ptr1
- store volatile i64 %ext8, i64 *%ptr1
- store volatile i64 %ext9, i64 *%ptr1
- store volatile i64 %ext10, i64 *%ptr1
- store volatile i64 %ext11, i64 *%ptr1
- store volatile i64 %ext12, i64 *%ptr1
- store volatile i64 %ext13, i64 *%ptr1
- store volatile i64 %ext14, i64 *%ptr1
- store volatile i64 %ext15, i64 *%ptr1
-
- ret void
-}
diff --git a/test/CodeGen/SystemZ/int-conv-10.ll b/test/CodeGen/SystemZ/int-conv-10.ll
index f2f71d9..781c74c 100644
--- a/test/CodeGen/SystemZ/int-conv-10.ll
+++ b/test/CodeGen/SystemZ/int-conv-10.ll
@@ -111,80 +111,3 @@ define i64 @f10(i64 %src, i64 %index) {
%ext = zext i32 %word to i64
ret i64 %ext
}
-
-; Test a case where we spill the source of at least one LLGFR. We want
-; to use LLGF if possible.
-define void @f11(i64 *%ptr1, i32 *%ptr2) {
-; CHECK-LABEL: f11:
-; CHECK: llgf {{%r[0-9]+}}, 16{{[04]}}(%r15)
-; CHECK: br %r14
- %val0 = load volatile i32 *%ptr2
- %val1 = load volatile i32 *%ptr2
- %val2 = load volatile i32 *%ptr2
- %val3 = load volatile i32 *%ptr2
- %val4 = load volatile i32 *%ptr2
- %val5 = load volatile i32 *%ptr2
- %val6 = load volatile i32 *%ptr2
- %val7 = load volatile i32 *%ptr2
- %val8 = load volatile i32 *%ptr2
- %val9 = load volatile i32 *%ptr2
- %val10 = load volatile i32 *%ptr2
- %val11 = load volatile i32 *%ptr2
- %val12 = load volatile i32 *%ptr2
- %val13 = load volatile i32 *%ptr2
- %val14 = load volatile i32 *%ptr2
- %val15 = load volatile i32 *%ptr2
-
- %ext0 = zext i32 %val0 to i64
- %ext1 = zext i32 %val1 to i64
- %ext2 = zext i32 %val2 to i64
- %ext3 = zext i32 %val3 to i64
- %ext4 = zext i32 %val4 to i64
- %ext5 = zext i32 %val5 to i64
- %ext6 = zext i32 %val6 to i64
- %ext7 = zext i32 %val7 to i64
- %ext8 = zext i32 %val8 to i64
- %ext9 = zext i32 %val9 to i64
- %ext10 = zext i32 %val10 to i64
- %ext11 = zext i32 %val11 to i64
- %ext12 = zext i32 %val12 to i64
- %ext13 = zext i32 %val13 to i64
- %ext14 = zext i32 %val14 to i64
- %ext15 = zext i32 %val15 to i64
-
- store volatile i32 %val0, i32 *%ptr2
- store volatile i32 %val1, i32 *%ptr2
- store volatile i32 %val2, i32 *%ptr2
- store volatile i32 %val3, i32 *%ptr2
- store volatile i32 %val4, i32 *%ptr2
- store volatile i32 %val5, i32 *%ptr2
- store volatile i32 %val6, i32 *%ptr2
- store volatile i32 %val7, i32 *%ptr2
- store volatile i32 %val8, i32 *%ptr2
- store volatile i32 %val9, i32 *%ptr2
- store volatile i32 %val10, i32 *%ptr2
- store volatile i32 %val11, i32 *%ptr2
- store volatile i32 %val12, i32 *%ptr2
- store volatile i32 %val13, i32 *%ptr2
- store volatile i32 %val14, i32 *%ptr2
- store volatile i32 %val15, i32 *%ptr2
-
- store volatile i64 %ext0, i64 *%ptr1
- store volatile i64 %ext1, i64 *%ptr1
- store volatile i64 %ext2, i64 *%ptr1
- store volatile i64 %ext3, i64 *%ptr1
- store volatile i64 %ext4, i64 *%ptr1
- store volatile i64 %ext5, i64 *%ptr1
- store volatile i64 %ext6, i64 *%ptr1
- store volatile i64 %ext7, i64 *%ptr1
- store volatile i64 %ext8, i64 *%ptr1
- store volatile i64 %ext9, i64 *%ptr1
- store volatile i64 %ext10, i64 *%ptr1
- store volatile i64 %ext11, i64 *%ptr1
- store volatile i64 %ext12, i64 *%ptr1
- store volatile i64 %ext13, i64 *%ptr1
- store volatile i64 %ext14, i64 *%ptr1
- store volatile i64 %ext15, i64 *%ptr1
-
- ret void
-}