diff options
author | Chris Lattner <sabre@nondot.org> | 2010-04-15 05:40:59 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2010-04-15 05:40:59 +0000 |
commit | 01f604a270eb7519eae006f3c6491bef6f9f8a8f (patch) | |
tree | 2ef2b13ce6f4a84ece6070c2f97bc29d0ed66000 /test/CodeGen | |
parent | 828c441a908fce30c16921936fcab5a9c063ea69 (diff) | |
download | external_llvm-01f604a270eb7519eae006f3c6491bef6f9f8a8f.zip external_llvm-01f604a270eb7519eae006f3c6491bef6f9f8a8f.tar.gz external_llvm-01f604a270eb7519eae006f3c6491bef6f9f8a8f.tar.bz2 |
teach codegen to turn trunc(zextload) into load when possible.
This doesn't occur much at all, it only seems to formed in the case
when the trunc optimization kicks in due to phase ordering. In that
case it is saves a few bytes on x86-32.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101350 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/X86/store-narrow.ll | 52 |
1 files changed, 39 insertions, 13 deletions
diff --git a/test/CodeGen/X86/store-narrow.ll b/test/CodeGen/X86/store-narrow.ll index 23aa616..f47bf31 100644 --- a/test/CodeGen/X86/store-narrow.ll +++ b/test/CodeGen/X86/store-narrow.ll @@ -1,5 +1,6 @@ ; rdar://7860110 -; RUN: llc < %s | FileCheck %s +; RUN: llc < %s | FileCheck %s -check-prefix=X64 +; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.2" @@ -12,8 +13,12 @@ entry: store i32 %D, i32* %a0, align 4 ret void -; CHECK: test1: -; CHECK: movb %sil, (%rdi) +; X64: test1: +; X64: movb %sil, (%rdi) + +; X32: test1: +; X32: movb 8(%esp), %al +; X32: movb %al, (%{{.*}}) } define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp { @@ -25,8 +30,12 @@ entry: %D = or i32 %B, %CS store i32 %D, i32* %a0, align 4 ret void -; CHECK: test2: -; CHECK: movb %sil, 1(%rdi) +; X64: test2: +; X64: movb %sil, 1(%rdi) + +; X32: test2: +; X32: movb 8(%esp), %al +; X32: movb %al, 1(%{{.*}}) } define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { @@ -37,8 +46,12 @@ entry: %D = or i32 %B, %C store i32 %D, i32* %a0, align 4 ret void -; CHECK: test3: -; CHECK: movw %si, (%rdi) +; X64: test3: +; X64: movw %si, (%rdi) + +; X32: test3: +; X32: movw 8(%esp), %ax +; X32: movw %ax, (%{{.*}}) } define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { @@ -50,8 +63,12 @@ entry: %D = or i32 %B, %CS store i32 %D, i32* %a0, align 4 ret void -; CHECK: test4: -; CHECK: movw %si, 2(%rdi) +; X64: test4: +; X64: movw %si, 2(%rdi) + +; X32: test4: +; X32: movw 8(%esp), %ax +; X32: movw %ax, 2(%{{.*}}) } define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp { @@ -63,8 +80,12 @@ entry: %D = or i64 %B, %CS store i64 %D, i64* %a0, align 4 ret void -; CHECK: test5: -; CHECK: movw %si, 2(%rdi) +; X64: test5: +; X64: movw %si, 2(%rdi) + +; X32: test5: +; X32: movw 8(%esp), %ax +; X32: movw %ax, 2(%{{.*}}) } define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp { @@ -76,6 +97,11 @@ entry: %D = or i64 %B, %CS store i64 %D, i64* %a0, align 4 ret void -; CHECK: test6: -; CHECK: movb %sil, 5(%rdi) +; X64: test6: +; X64: movb %sil, 5(%rdi) + + +; X32: test6: +; X32: movb 8(%esp), %al +; X32: movb %al, 5(%{{.*}}) } |