aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/narrow-shl-cst.ll
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2011-04-22 15:30:40 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2011-04-22 15:30:40 +0000
commitb20a8fc8a6bf57dbde0e9238cf535abb4326dc80 (patch)
tree5ae0148f9eb1ca78f9934cdc6a0d618f89a8946c /test/CodeGen/X86/narrow-shl-cst.ll
parenteab631362d676c0113e052cc7e877eef4da544b8 (diff)
downloadexternal_llvm-b20a8fc8a6bf57dbde0e9238cf535abb4326dc80.zip
external_llvm-b20a8fc8a6bf57dbde0e9238cf535abb4326dc80.tar.gz
external_llvm-b20a8fc8a6bf57dbde0e9238cf535abb4326dc80.tar.bz2
X86: Try to use a smaller encoding by transforming (X << C1) & C2 into (X & (C2 >> C1)) & C1. (Part of PR5039)
This tends to happen a lot with bitfield code generated by clang. A simple example for x86_64 is uint64_t foo(uint64_t x) { return (x&1) << 42; } which used to compile into bloated code: shlq $42, %rdi ## encoding: [0x48,0xc1,0xe7,0x2a] movabsq $4398046511104, %rax ## encoding: [0x48,0xb8,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00] andq %rdi, %rax ## encoding: [0x48,0x21,0xf8] ret ## encoding: [0xc3] with this patch we can fold the immediate into the and: andq $1, %rdi ## encoding: [0x48,0x83,0xe7,0x01] movq %rdi, %rax ## encoding: [0x48,0x89,0xf8] shlq $42, %rax ## encoding: [0x48,0xc1,0xe0,0x2a] ret ## encoding: [0xc3] It's possible to save another byte by using 'andl' instead of 'andq' but I currently see no way of doing that without making this code even more complicated. See the TODOs in the code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129990 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/narrow-shl-cst.ll')
-rw-r--r--test/CodeGen/X86/narrow-shl-cst.ll101
1 files changed, 101 insertions, 0 deletions
diff --git a/test/CodeGen/X86/narrow-shl-cst.ll b/test/CodeGen/X86/narrow-shl-cst.ll
new file mode 100644
index 0000000..a404f34
--- /dev/null
+++ b/test/CodeGen/X86/narrow-shl-cst.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; PR5039
+
+define i32 @test1(i32 %x) nounwind {
+ %and = shl i32 %x, 10
+ %shl = and i32 %and, 31744
+ ret i32 %shl
+; CHECK: test1:
+; CHECK: andl $31
+; CHECK: shll $10
+}
+
+define i32 @test2(i32 %x) nounwind {
+ %or = shl i32 %x, 10
+ %shl = or i32 %or, 31744
+ ret i32 %shl
+; CHECK: test2:
+; CHECK: orl $31
+; CHECK: shll $10
+}
+
+define i32 @test3(i32 %x) nounwind {
+ %xor = shl i32 %x, 10
+ %shl = xor i32 %xor, 31744
+ ret i32 %shl
+; CHECK: test3:
+; CHECK: xorl $31
+; CHECK: shll $10
+}
+
+define i64 @test4(i64 %x) nounwind {
+ %and = shl i64 %x, 40
+ %shl = and i64 %and, 264982302294016
+ ret i64 %shl
+; CHECK: test4:
+; CHECK: andq $241
+; CHECK: shlq $40
+}
+
+define i64 @test5(i64 %x) nounwind {
+ %and = shl i64 %x, 40
+ %shl = and i64 %and, 34084860461056
+ ret i64 %shl
+; CHECK: test5:
+; CHECK: andq $31
+; CHECK: shlq $40
+}
+
+define i64 @test6(i64 %x) nounwind {
+ %and = shl i64 %x, 32
+ %shl = and i64 %and, -281474976710656
+ ret i64 %shl
+; CHECK: test6:
+; CHECK: andq $-65536
+; CHECK: shlq $32
+}
+
+define i64 @test7(i64 %x) nounwind {
+ %or = shl i64 %x, 40
+ %shl = or i64 %or, 264982302294016
+ ret i64 %shl
+; CHECK: test7:
+; CHECK: orq $241
+; CHECK: shlq $40
+}
+
+define i64 @test8(i64 %x) nounwind {
+ %or = shl i64 %x, 40
+ %shl = or i64 %or, 34084860461056
+ ret i64 %shl
+; CHECK: test8:
+; CHECK: orq $31
+; CHECK: shlq $40
+}
+
+define i64 @test9(i64 %x) nounwind {
+ %xor = shl i64 %x, 40
+ %shl = xor i64 %xor, 264982302294016
+ ret i64 %shl
+; CHECK: test9:
+; CHECK: orq $241
+; CHECK: shlq $40
+}
+
+define i64 @test10(i64 %x) nounwind {
+ %xor = shl i64 %x, 40
+ %shl = xor i64 %xor, 34084860461056
+ ret i64 %shl
+; CHECK: test10:
+; CHECK: xorq $31
+; CHECK: shlq $40
+}
+
+define i64 @test11(i64 %x) nounwind {
+ %xor = shl i64 %x, 33
+ %shl = xor i64 %xor, -562949953421312
+ ret i64 %shl
+; CHECK: test11:
+; CHECK: xorq $-65536
+; CHECK: shlq $33
+}