aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/xor-icmp.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2010-01-06 19:38:29 +0000
committerEvan Cheng <evan.cheng@apple.com>2010-01-06 19:38:29 +0000
commitd40d03e1bd1d51857fc2f9f9230e334c3a32b249 (patch)
treecc28a8f045a22783624b225433ca137ab6a815d9 /test/CodeGen/X86/xor-icmp.ll
parentb419a5cfe9fae934d6c05da173934673f5402c99 (diff)
downloadexternal_llvm-d40d03e1bd1d51857fc2f9f9230e334c3a32b249.zip
external_llvm-d40d03e1bd1d51857fc2f9f9230e334c3a32b249.tar.gz
external_llvm-d40d03e1bd1d51857fc2f9f9230e334c3a32b249.tar.bz2
Teach dag combine to fold the following transformation more aggressively:
(OP (trunc x), (trunc y)) -> (trunc (OP x, y)) Unfortunately this simple change causes dag combine to infinite looping. The problem is the shrink demanded ops optimization tend to canonicalize expressions in the opposite manner. That is badness. This patch disable those optimizations in dag combine but instead it is done as a late pass in sdisel. This also exposes some deficiencies in dag combine and x86 setcc / brcond lowering. Teach them to look pass ISD::TRUNCATE in various places. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92849 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/xor-icmp.ll')
-rw-r--r--test/CodeGen/X86/xor-icmp.ll36
1 files changed, 36 insertions, 0 deletions
diff --git a/test/CodeGen/X86/xor-icmp.ll b/test/CodeGen/X86/xor-icmp.ll
new file mode 100644
index 0000000..a6bdb13
--- /dev/null
+++ b/test/CodeGen/X86/xor-icmp.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X64
+
+define i32 @t(i32 %a, i32 %b) nounwind ssp {
+entry:
+; X32: t:
+; X32: xorb
+; X32-NOT: andb
+; X32-NOT: shrb
+; X32: testb $64
+; X32: jne
+
+; X64: t:
+; X64-NOT: setne
+; X64: xorl
+; X64: testb $64
+; X64: jne
+ %0 = and i32 %a, 16384
+ %1 = icmp ne i32 %0, 0
+ %2 = and i32 %b, 16384
+ %3 = icmp ne i32 %2, 0
+ %4 = xor i1 %1, %3
+ br i1 %4, label %bb1, label %bb
+
+bb: ; preds = %entry
+ %5 = tail call i32 (...)* @foo() nounwind ; <i32> [#uses=1]
+ ret i32 %5
+
+bb1: ; preds = %entry
+ %6 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=1]
+ ret i32 %6
+}
+
+declare i32 @foo(...)
+
+declare i32 @bar(...)