aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/PowerPC/rlwimi2.ll
diff options
context:
space:
mode:
authorDan Gohman <djg@cray.com>2007-07-18 16:29:46 +0000
committerDan Gohman <djg@cray.com>2007-07-18 16:29:46 +0000
commitf17a25c88b892d30c2b41ba7ecdfbdfb2b4be9cc (patch)
treeebb79ea1ee5e3bc1fdf38541a811a8b804f0679a /test/CodeGen/PowerPC/rlwimi2.ll
downloadexternal_llvm-f17a25c88b892d30c2b41ba7ecdfbdfb2b4be9cc.zip
external_llvm-f17a25c88b892d30c2b41ba7ecdfbdfb2b4be9cc.tar.gz
external_llvm-f17a25c88b892d30c2b41ba7ecdfbdfb2b4be9cc.tar.bz2
It's not necessary to do rounding for alloca operations when the requested
alignment is equal to the stack alignment. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40004 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/rlwimi2.ll')
-rw-r--r--test/CodeGen/PowerPC/rlwimi2.ll31
1 files changed, 31 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/rlwimi2.ll b/test/CodeGen/PowerPC/rlwimi2.ll
new file mode 100644
index 0000000..c264d2e
--- /dev/null
+++ b/test/CodeGen/PowerPC/rlwimi2.ll
@@ -0,0 +1,31 @@
+; All of these ands and shifts should be folded into rlwimi's
+; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -o %t -f
+; RUN: grep rlwimi %t | wc -l | grep 3
+; RUN: grep srwi %t | wc -l | grep 1
+; RUN: not grep slwi %t
+
+implementation ; Functions:
+
+ushort %test1(uint %srcA, uint %srcB, uint %alpha) {
+entry:
+ %tmp.1 = shl uint %srcA, ubyte 15 ; <uint> [#uses=1]
+ %tmp.4 = and uint %tmp.1, 32505856 ; <uint> [#uses=1]
+ %tmp.6 = and uint %srcA, 31775 ; <uint> [#uses=1]
+ %tmp.7 = or uint %tmp.4, %tmp.6 ; <uint> [#uses=1]
+ %tmp.9 = shl uint %srcB, ubyte 15 ; <uint> [#uses=1]
+ %tmp.12 = and uint %tmp.9, 32505856 ; <uint> [#uses=1]
+ %tmp.14 = and uint %srcB, 31775 ; <uint> [#uses=1]
+ %tmp.15 = or uint %tmp.12, %tmp.14 ; <uint> [#uses=1]
+ %tmp.18 = mul uint %tmp.7, %alpha ; <uint> [#uses=1]
+ %tmp.20 = sub uint 32, %alpha ; <uint> [#uses=1]
+ %tmp.22 = mul uint %tmp.15, %tmp.20 ; <uint> [#uses=1]
+ %tmp.23 = add uint %tmp.22, %tmp.18 ; <uint> [#uses=2]
+ %tmp.27 = shr uint %tmp.23, ubyte 5 ; <uint> [#uses=1]
+ %tmp.28 = cast uint %tmp.27 to ushort ; <ushort> [#uses=1]
+ %tmp.29 = and ushort %tmp.28, 31775 ; <ushort> [#uses=1]
+ %tmp.33 = shr uint %tmp.23, ubyte 20 ; <uint> [#uses=1]
+ %tmp.34 = cast uint %tmp.33 to ushort ; <ushort> [#uses=1]
+ %tmp.35 = and ushort %tmp.34, 992 ; <ushort> [#uses=1]
+ %tmp.36 = or ushort %tmp.29, %tmp.35 ; <ushort> [#uses=1]
+ ret ushort %tmp.36
+}