aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--lib/Target/X86/X86InstrInfo.td12
-rw-r--r--test/CodeGen/X86/select-i8.ll12
-rw-r--r--test/CodeGen/X86/subclass-coalesce.ll17
4 files changed, 27 insertions, 19 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 72a2727..5fdd198 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -276,9 +276,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
- setOperationAction(ISD::SELECT , MVT::i1 , Promote);
- setOperationAction(ISD::SELECT , MVT::i8 , Promote);
+ setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
+ setOperationAction(ISD::SELECT , MVT::i8 , Custom);
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
@@ -7707,6 +7707,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
+ case X86::CMOV_GR8:
case X86::CMOV_V1I64:
case X86::CMOV_FR32:
case X86::CMOV_FR64:
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index f566127..15235d0 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -1035,6 +1035,18 @@ let isTwoAddress = 1 in {
// Conditional moves
let Uses = [EFLAGS] in {
let isCommutable = 1 in {
+
+// X86 doesn't have 8-bit conditional moves. Use a customDAGSchedInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure.
+let usesCustomDAGSchedInserter = 1 in
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"cmovb\t{$src2, $dst|$dst, $src2}",
diff --git a/test/CodeGen/X86/select-i8.ll b/test/CodeGen/X86/select-i8.ll
new file mode 100644
index 0000000..8b2444d
--- /dev/null
+++ b/test/CodeGen/X86/select-i8.ll
@@ -0,0 +1,12 @@
+; RUN: llvm-as < %s | llc -march=x86 > %t
+; RUN: not grep movz %t
+; RUN: not grep cmov %t
+; RUN: grep movb %t | count 2
+
+; Don't try to use a 16-bit conditional move to do an 8-bit select,
+; because it isn't worth it. Just use a branch instead.
+
+define i8 @foo(i1 inreg %c, i8 inreg %a, i8 inreg %b) {
+ %d = select i1 %c, i8 %a, i8 %b
+ ret i8 %d
+}
diff --git a/test/CodeGen/X86/subclass-coalesce.ll b/test/CodeGen/X86/subclass-coalesce.ll
deleted file mode 100644
index 2673be7..0000000
--- a/test/CodeGen/X86/subclass-coalesce.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llvm-as < %s | llc -march=x86 -stats |& grep {Number of cross class joins performed}
-
-@mem.6 = external global i64 ; <i64*> [#uses=1]
-
-define i64 @attachFunc() nounwind {
-entry:
- %tmp64.i = add i64 0, 72 ; <i64> [#uses=1]
- %tmp68.i = load i64* @mem.6, align 8 ; <i64> [#uses=1]
- %tmp70.i = icmp sgt i64 %tmp64.i, %tmp68.i ; <i1> [#uses=1]
- br i1 %tmp70.i, label %bb73.i, label %bb116
-
-bb73.i: ; preds = %entry
- br label %bb116
-
-bb116: ; preds = %bb73.i, %entry
- ret i64 %tmp68.i
-}