aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/stack-align.ll
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2008-01-26 19:45:50 +0000
committerChris Lattner <sabre@nondot.org>2008-01-26 19:45:50 +0000
commit1329cb8d8992c34365fcc2ac0447356708157dfb (patch)
tree70766a5116a8fd3fc49296152fc8260d06625cbe /test/CodeGen/X86/stack-align.ll
parent216f3f6522ca2d056d644bb24e4d73a1f2335f6d (diff)
downloadexternal_llvm-1329cb8d8992c34365fcc2ac0447356708157dfb.zip
external_llvm-1329cb8d8992c34365fcc2ac0447356708157dfb.tar.gz
external_llvm-1329cb8d8992c34365fcc2ac0447356708157dfb.tar.bz2
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to: _test: movsd LCPI1_0, %xmm0 movapd %xmm0, %xmm1 *** andpd 4(%esp), %xmm1 andpd _G, %xmm0 addsd %xmm1, %xmm0 movl 20(%esp), %eax movsd %xmm0, (%eax) ret instead of: _test: movsd LCPI1_0, %xmm0 ** movsd 4(%esp), %xmm1 ** andpd %xmm0, %xmm1 andpd _G, %xmm0 addsd %xmm1, %xmm0 movl 20(%esp), %eax movsd %xmm0, (%eax) ret git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46401 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/stack-align.ll')
-rw-r--r--test/CodeGen/X86/stack-align.ll22
1 files changed, 22 insertions, 0 deletions
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
new file mode 100644
index 0000000..1e6c2b2
--- /dev/null
+++ b/test/CodeGen/X86/stack-align.ll
@@ -0,0 +1,22 @@
+; RUN: llvm-as < %s | llc -relocation-model=static -mcpu=yonah | grep {andpd.*4(%esp), %xmm}
+
+; The double argument is at 4(esp) which is 16-byte aligned, allowing us to
+; fold the load into the andpd.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+@G = external global double
+
+define void @test({ double, double }* byval %z, double* %P) {
+entry:
+ %tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp1 = load double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
+ %tmp3 = load double* @G, align 16 ; <double> [#uses=1]
+ %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
+ %tmp6 = add double %tmp4, %tmp2 ; <double> [#uses=1]
+ store double %tmp6, double* %P, align 8
+ ret void
+}
+
+declare double @fabs(double)