aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/stack-align.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2010-06-21 22:17:20 +0000
committerDan Gohman <gohman@apple.com>2010-06-21 22:17:20 +0000
commit584fedf188e863604f7b946a931c0f40cf9c944e (patch)
tree124aa24862dceb32621b9fbdefa15e4f42953b0a /test/CodeGen/X86/stack-align.ll
parentb7cc3f6ae6311395080693a14e67105ddc53665c (diff)
downloadexternal_llvm-584fedf188e863604f7b946a931c0f40cf9c944e.zip
external_llvm-584fedf188e863604f7b946a931c0f40cf9c944e.tar.gz
external_llvm-584fedf188e863604f7b946a931c0f40cf9c944e.tar.bz2
Teach two-address lowering how to unfold a load to open up commuting
opportunities. For example, this lets it emit this: movq (%rax), %rcx addq %rdx, %rcx instead of this: movq %rdx, %rcx addq (%rax), %rcx in the case where %rdx has subsequent uses. It's the same number of instructions, and usually the same encoding size on x86, but it appears faster, and in general, it may allow better scheduling for the load. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@106493 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/stack-align.ll')
-rw-r--r--test/CodeGen/X86/stack-align.ll9
1 files changed, 5 insertions, 4 deletions
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index 271ad1a..8ca0b12 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -9,14 +9,15 @@ target triple = "i686-apple-darwin8"
define void @test({ double, double }* byval %z, double* %P) {
entry:
+ %tmp3 = load double* @G, align 16 ; <double> [#uses=1]
+ %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
+ volatile store double %tmp4, double* %P
%tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp1 = load double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp1 = volatile load double* %tmp, align 8 ; <double> [#uses=1]
%tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
; CHECK: andpd{{.*}}4(%esp), %xmm
- %tmp3 = load double* @G, align 16 ; <double> [#uses=1]
- %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
- store double %tmp6, double* %P, align 8
+ volatile store double %tmp6, double* %P, align 8
ret void
}