diff options
author | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-03 10:10:02 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-03 10:10:02 +0000 |
commit | fa487e83a83c260d6a50f3df00a0eb012553a912 (patch) | |
tree | f6ddd72df044eaa9cabbce37fd4b04f64b978139 /test/CodeGen/SystemZ/fp-cmp-01.ll | |
parent | b81b477cd4392a51112c3af0659ea9fc176e74f1 (diff) | |
download | external_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.zip external_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.gz external_llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.bz2 |
[SystemZ] Fold more spills
Add a mapping from register-based <INSN>R instructions to the corresponding
memory-based <INSN>. Use it to cut down on the number of spill loads.
Some instructions extend their operands from smaller fields, so this
required a new TSFlags field to say how big the unextended operand is.
This optimisation doesn't trigger for C(G)R and CL(G)R because in practice
we always combine those instructions with a branch. Adding a test for every
other case probably seems excessive, but it did catch a missed optimisation
for DSGF (fixed in r185435).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185529 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/fp-cmp-01.ll')
-rw-r--r-- | test/CodeGen/SystemZ/fp-cmp-01.ll | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/test/CodeGen/SystemZ/fp-cmp-01.ll b/test/CodeGen/SystemZ/fp-cmp-01.ll index cb2a6be..5aef57f 100644 --- a/test/CodeGen/SystemZ/fp-cmp-01.ll +++ b/test/CodeGen/SystemZ/fp-cmp-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check comparison with registers. define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) { ; CHECK: f1: @@ -87,3 +89,61 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) { %res = select i1 %cond, i64 %a, i64 %b ret i64 %res } + +; Check that comparisons of spilled values can use CEB rather than CEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %cmp0 = fcmp olt float %ret, %val0 + %cmp1 = fcmp olt float %ret, %val1 + %cmp2 = fcmp olt float %ret, %val2 + %cmp3 = fcmp olt float %ret, %val3 + %cmp4 = fcmp olt float %ret, %val4 + %cmp5 = fcmp olt float %ret, %val5 + %cmp6 = fcmp olt float %ret, %val6 + %cmp7 = fcmp olt float %ret, %val7 + %cmp8 = fcmp olt float %ret, %val8 + %cmp9 = fcmp olt float %ret, %val9 + %cmp10 = fcmp olt float %ret, %val10 + + %sel0 = select i1 %cmp0, float %ret, float 0.0 + %sel1 = select i1 %cmp1, float %sel0, float 1.0 + %sel2 = select i1 %cmp2, float %sel1, float 2.0 + %sel3 = select i1 %cmp3, float %sel2, float 3.0 + %sel4 = select i1 %cmp4, float %sel3, float 4.0 + %sel5 = select i1 %cmp5, float %sel4, float 5.0 + %sel6 = select i1 %cmp6, float %sel5, float 6.0 + %sel7 = select i1 %cmp7, float %sel6, float 7.0 + %sel8 = select i1 %cmp8, float %sel7, float 8.0 + %sel9 = select i1 %cmp9, float %sel8, float 9.0 + %sel10 = select i1 %cmp10, float %sel9, float 10.0 + + ret float %sel10 +} |