diff options
author | Tanya Lattner <tonic@nondot.org> | 2008-02-19 08:07:33 +0000 |
---|---|---|
committer | Tanya Lattner <tonic@nondot.org> | 2008-02-19 08:07:33 +0000 |
commit | 5ebaf3ba1b5f8f05c01b1d7898a35273e83c1750 (patch) | |
tree | fb23c878019caf961242d6922d065dfae51fb7ca /test/CodeGen/PowerPC/rlwimi2.ll | |
parent | 660a728f833d167b82e04950e84270d8909c81f1 (diff) | |
download | external_llvm-5ebaf3ba1b5f8f05c01b1d7898a35273e83c1750.zip external_llvm-5ebaf3ba1b5f8f05c01b1d7898a35273e83c1750.tar.gz external_llvm-5ebaf3ba1b5f8f05c01b1d7898a35273e83c1750.tar.bz2 |
Remove llvm-upgrade and update tests.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47325 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/rlwimi2.ll')
-rw-r--r-- | test/CodeGen/PowerPC/rlwimi2.ll | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/test/CodeGen/PowerPC/rlwimi2.ll b/test/CodeGen/PowerPC/rlwimi2.ll index 501daf0..33eaacf 100644 --- a/test/CodeGen/PowerPC/rlwimi2.ll +++ b/test/CodeGen/PowerPC/rlwimi2.ll @@ -1,31 +1,29 @@ ; All of these ands and shifts should be folded into rlwimi's -; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -o %t -f +; RUN: llvm-as < %s | llc -march=ppc32 -o %t -f ; RUN: grep rlwimi %t | count 3 ; RUN: grep srwi %t | count 1 ; RUN: not grep slwi %t -implementation ; Functions: - -ushort %test1(uint %srcA, uint %srcB, uint %alpha) { +define i16 @test1(i32 %srcA, i32 %srcB, i32 %alpha) { entry: - %tmp.1 = shl uint %srcA, ubyte 15 ; <uint> [#uses=1] - %tmp.4 = and uint %tmp.1, 32505856 ; <uint> [#uses=1] - %tmp.6 = and uint %srcA, 31775 ; <uint> [#uses=1] - %tmp.7 = or uint %tmp.4, %tmp.6 ; <uint> [#uses=1] - %tmp.9 = shl uint %srcB, ubyte 15 ; <uint> [#uses=1] - %tmp.12 = and uint %tmp.9, 32505856 ; <uint> [#uses=1] - %tmp.14 = and uint %srcB, 31775 ; <uint> [#uses=1] - %tmp.15 = or uint %tmp.12, %tmp.14 ; <uint> [#uses=1] - %tmp.18 = mul uint %tmp.7, %alpha ; <uint> [#uses=1] - %tmp.20 = sub uint 32, %alpha ; <uint> [#uses=1] - %tmp.22 = mul uint %tmp.15, %tmp.20 ; <uint> [#uses=1] - %tmp.23 = add uint %tmp.22, %tmp.18 ; <uint> [#uses=2] - %tmp.27 = shr uint %tmp.23, ubyte 5 ; <uint> [#uses=1] - %tmp.28 = cast uint %tmp.27 to ushort ; <ushort> [#uses=1] - %tmp.29 = and ushort %tmp.28, 31775 ; <ushort> [#uses=1] - %tmp.33 = shr uint %tmp.23, ubyte 20 ; <uint> [#uses=1] - %tmp.34 = cast uint %tmp.33 to ushort ; <ushort> [#uses=1] - %tmp.35 = and ushort %tmp.34, 992 ; <ushort> [#uses=1] - %tmp.36 = or ushort %tmp.29, %tmp.35 ; <ushort> [#uses=1] - ret ushort %tmp.36 + %tmp.1 = shl i32 %srcA, 15 ; <i32> [#uses=1] + %tmp.4 = and i32 %tmp.1, 32505856 ; <i32> [#uses=1] + %tmp.6 = and i32 %srcA, 31775 ; <i32> [#uses=1] + %tmp.7 = or i32 %tmp.4, %tmp.6 ; <i32> [#uses=1] + %tmp.9 = shl i32 %srcB, 15 ; <i32> [#uses=1] + %tmp.12 = and i32 %tmp.9, 32505856 ; <i32> [#uses=1] + %tmp.14 = and i32 %srcB, 31775 ; <i32> [#uses=1] + %tmp.15 = or i32 %tmp.12, %tmp.14 ; <i32> [#uses=1] + %tmp.18 = mul i32 %tmp.7, %alpha ; <i32> [#uses=1] + %tmp.20 = sub i32 32, %alpha ; <i32> [#uses=1] + %tmp.22 = mul i32 %tmp.15, %tmp.20 ; <i32> [#uses=1] + %tmp.23 = add i32 %tmp.22, %tmp.18 ; <i32> [#uses=2] + %tmp.27 = lshr i32 %tmp.23, 5 ; <i32> [#uses=1] + %tmp.28 = trunc i32 %tmp.27 to i16 ; <i16> [#uses=1] + %tmp.29 = and i16 %tmp.28, 31775 ; <i16> [#uses=1] + %tmp.33 = lshr i32 %tmp.23, 20 ; <i32> [#uses=1] + %tmp.34 = trunc i32 %tmp.33 to i16 ; <i16> [#uses=1] + %tmp.35 = and i16 %tmp.34, 992 ; <i16> [#uses=1] + %tmp.36 = or i16 %tmp.29, %tmp.35 ; <i16> [#uses=1] + ret i16 %tmp.36 } |