aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/SystemZ/int-add-05.ll
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-07-03 10:10:02 +0000
committerRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-07-03 10:10:02 +0000
commitfa487e83a83c260d6a50f3df00a0eb012553a912 (patch)
treef6ddd72df044eaa9cabbce37fd4b04f64b978139 /test/CodeGen/SystemZ/int-add-05.ll
parentb81b477cd4392a51112c3af0659ea9fc176e74f1 (diff)
downloadexternal_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.zip
external_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.gz
external_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.bz2
[SystemZ] Fold more spills
Add a mapping from register-based <INSN>R instructions to the corresponding memory-based <INSN>. Use it to cut down on the number of spill loads. Some instructions extend their operands from smaller fields, so this required a new TSFlags field to say how big the unextended operand is. This optimisation doesn't trigger for C(G)R and CL(G)R because in practice we always combine those instructions with a branch. Adding a test for every other case probably seems excessive, but it did catch a missed optimisation for DSGF (fixed in r185435). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185529 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/int-add-05.ll')
-rw-r--r--test/CodeGen/SystemZ/int-add-05.ll45
1 files changed, 45 insertions, 0 deletions
diff --git a/test/CodeGen/SystemZ/int-add-05.ll b/test/CodeGen/SystemZ/int-add-05.ll
index ae32cc4..ee840ac 100644
--- a/test/CodeGen/SystemZ/int-add-05.ll
+++ b/test/CodeGen/SystemZ/int-add-05.ll
@@ -2,6 +2,8 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+declare i64 @foo()
+
; Check AGR.
define i64 @f1(i64 %a, i64 %b) {
; CHECK: f1:
@@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) {
%add = add i64 %a, %b
ret i64 %add
}
+
+; Check that additions of spilled values can use AG rather than AGR.
+define i64 @f9(i64 *%ptr0) {
+; CHECK: f9:
+; CHECK: brasl %r14, foo@PLT
+; CHECK: ag %r2, 160(%r15)
+; CHECK: br %r14
+ %ptr1 = getelementptr i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64 *%ptr0, i64 18
+
+ %val0 = load i64 *%ptr0
+ %val1 = load i64 *%ptr1
+ %val2 = load i64 *%ptr2
+ %val3 = load i64 *%ptr3
+ %val4 = load i64 *%ptr4
+ %val5 = load i64 *%ptr5
+ %val6 = load i64 *%ptr6
+ %val7 = load i64 *%ptr7
+ %val8 = load i64 *%ptr8
+ %val9 = load i64 *%ptr9
+
+ %ret = call i64 @foo()
+
+ %add0 = add i64 %ret, %val0
+ %add1 = add i64 %add0, %val1
+ %add2 = add i64 %add1, %val2
+ %add3 = add i64 %add2, %val3
+ %add4 = add i64 %add3, %val4
+ %add5 = add i64 %add4, %val5
+ %add6 = add i64 %add5, %val6
+ %add7 = add i64 %add6, %val7
+ %add8 = add i64 %add7, %val8
+ %add9 = add i64 %add8, %val9
+
+ ret i64 %add9
+}