diff options
author | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 02:29:45 +0000 |
---|---|---|
committer | Bill Schmidt <wschmidt@linux.vnet.ibm.com> | 2013-08-30 02:29:45 +0000 |
commit | 7248968fa529726b44d41bd25403d50c74db4bc4 (patch) | |
tree | c5982d93a8af273fdbe4e2712de1e43d2e94b4af /test/CodeGen/PowerPC/fast-isel-fold.ll | |
parent | 6dc6a89d73c24f20caabda4cdcd9279e88658d0b (diff) | |
download | external_llvm-7248968fa529726b44d41bd25403d50c74db4bc4.zip external_llvm-7248968fa529726b44d41bd25403d50c74db4bc4.tar.gz external_llvm-7248968fa529726b44d41bd25403d50c74db4bc4.tar.bz2 |
[PowerPC] Add loads, stores, and related things to fast-isel.
This is the next big chunk of fast-isel code. The primary purpose is
to implement selection of loads and stores, but there is a lot of
drag-along to support this. The common code to analyze addresses for
both loads and stores is substantial. It's also necessary to add the
materialization code for global values.
Related to load-store processing is the code to fold loads into
integer extends, since otherwise we generate lots of redundant
instructions. We also need to add some overrides to some FastEmit
routines to ensure we don't assign GPR 0 to a virtual register when
this would change the meaning of an instruction.
I added handling selection of a few binary arithmetic instructions, to
enable committing some test cases I wrote a while back.
Finally, ap couple of miscellaneous changes:
* I cleaned up some poor style from a previous patch in
PPCISelLowering.cpp, pointed out by David Blaikie.
* I enlarged the Addr.Offset field to avoid sign problems with 32-bit
offsets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189636 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/fast-isel-fold.ll')
-rw-r--r-- | test/CodeGen/PowerPC/fast-isel-fold.ll | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/fast-isel-fold.ll b/test/CodeGen/PowerPC/fast-isel-fold.ll new file mode 100644 index 0000000..21e6912 --- /dev/null +++ b/test/CodeGen/PowerPC/fast-isel-fold.ll @@ -0,0 +1,95 @@ +; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64 + +@a = global i8 1, align 1 +@b = global i16 2, align 2 +@c = global i32 4, align 4 + +define i32 @t3() nounwind uwtable ssp { +; ELF64: t3 + %1 = load i8* @a, align 1 + %2 = zext i8 %1 to i32 +; ELF64: lbz +; ELF64-NOT: rlwinm + ret i32 %2 +} + +define i32 @t4() nounwind uwtable ssp { +; ELF64: t4 + %1 = load i16* @b, align 2 + %2 = zext i16 %1 to i32 +; ELF64: lhz +; ELF64-NOT: rlwinm + ret i32 %2 +} + +define i32 @t5() nounwind uwtable ssp { +; ELF64: t5 + %1 = load i16* @b, align 2 + %2 = sext i16 %1 to i32 +; ELF64: lha +; ELF64-NOT: rlwinm + ret i32 %2 +} + +define i32 @t6() nounwind uwtable ssp { +; ELF64: t6 + %1 = load i8* @a, align 2 + %2 = sext i8 %1 to i32 +; ELF64: lbz +; ELF64-NOT: rlwinm + ret i32 %2 +} + +define i64 @t7() nounwind uwtable ssp { +; ELF64: t7 + %1 = load i8* @a, align 1 + %2 = zext i8 %1 to i64 +; ELF64: lbz +; ELF64-NOT: rldicl + ret i64 %2 +} + +define i64 @t8() nounwind uwtable ssp { +; ELF64: t8 + %1 = load i16* @b, align 2 + %2 = zext i16 %1 to i64 +; ELF64: lhz +; ELF64-NOT: rldicl + ret i64 %2 +} + +define i64 @t9() nounwind uwtable ssp { +; ELF64: t9 + %1 = load i16* @b, align 2 + %2 = sext i16 %1 to i64 +; ELF64: lha +; ELF64-NOT: extsh + ret i64 %2 +} + +define i64 @t10() nounwind uwtable ssp { +; ELF64: t10 + %1 = load i8* @a, align 2 + %2 = sext i8 %1 to i64 +; ELF64: lbz +; ELF64: extsb + ret i64 %2 +} + +define i64 @t11() nounwind uwtable ssp { +; ELF64: t11 + %1 = load i32* @c, align 4 + %2 = zext i32 %1 to i64 +; ELF64: lwz +; ELF64-NOT: rldicl + ret i64 %2 +} + +define i64 @t12() nounwind uwtable ssp { +; ELF64: t12 + %1 = load i32* @c, align 4 + %2 = sext i32 %1 to i64 +; ELF64: lwa +; ELF64-NOT: extsw + ret i64 %2 +} |