aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2008-12-12 00:56:36 +0000
committerBill Wendling <isanbard@gmail.com>2008-12-12 00:56:36 +0000
commitab55ebda1c2254f98b06e770bc2dae7d05a4a366 (patch)
tree770f22dcefcbafbbb4a2a91a7a195d7eca15ab47 /lib
parent905315441ea40b068d58f5d9ff8328264af99e90 (diff)
downloadexternal_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.zip
external_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.tar.gz
external_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.tar.bz2
Redo the arithmetic with overflow architecture. I was changing the semantics of
ISD::ADD to emit an implicit EFLAGS. This was horribly broken. Instead, replace the intrinsic with an ISD::SADDO node. Then custom lower that into an X86ISD::ADD node with a associated SETCC that checks the correct condition code (overflow or carry). Then that gets lowered into the correct X86::ADDOvf instruction. Similar for SUB and MUL instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60915 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp15
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp21
-rw-r--r--lib/Target/X86/X86ISelLowering.h6
-rw-r--r--lib/Target/X86/X86Instr64bit.td236
-rw-r--r--lib/Target/X86/X86InstrInfo.td544
5 files changed, 617 insertions, 205 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 314697d..6538417 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -967,11 +967,6 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (FoldedVOp.getNode()) return FoldedVOp;
}
- if (N->getNumValues() != 1)
- // FIXME: DAG combiner cannot handle arithmetic operators which produce
- // multiple results.
- return SDValue();
-
// fold (add x, undef) -> undef
if (N0.getOpcode() == ISD::UNDEF)
return N0;
@@ -1167,11 +1162,6 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (FoldedVOp.getNode()) return FoldedVOp;
}
- if (N->getNumValues() != 1)
- // FIXME: DAG combiner cannot handle arithmetic operators which produce
- // multiple results.
- return SDValue();
-
// fold (sub x, x) -> 0
if (N0 == N1)
return DAG.getConstant(0, N->getValueType(0));
@@ -1230,11 +1220,6 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
if (FoldedVOp.getNode()) return FoldedVOp;
}
- if (N->getNumValues() != 1)
- // FIXME: DAG combiner cannot handle arithmetic operators which produce
- // multiple results.
- return SDValue();
-
// fold (mul x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, VT);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 091bc42..496112d 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -5210,9 +5210,9 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
if (Cond.getOpcode() == ISD::SETCC)
Cond = LowerSETCC(Cond, DAG);
- else if (Cond.getOpcode() == ISD::SADDO || Cond.getOpcode() == ISD::UADDO ||
- Cond.getOpcode() == ISD::SSUBO || Cond.getOpcode() == ISD::USUBO ||
- Cond.getOpcode() == ISD::SMULO || Cond.getOpcode() == ISD::UMULO)
+ else if (Cond.getOpcode() == X86ISD::ADD ||
+ Cond.getOpcode() == X86ISD::SUB ||
+ Cond.getOpcode() == X86ISD::MUL)
Cond = LowerXALUO(Cond, DAG);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
@@ -6142,27 +6142,27 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Unknown ovf instruction!");
case ISD::SADDO:
- BaseOp = ISD::ADD;
+ BaseOp = X86ISD::ADD;
Cond = X86::COND_O;
break;
case ISD::UADDO:
- BaseOp = ISD::ADD;
+ BaseOp = X86ISD::ADD;
Cond = X86::COND_C;
break;
case ISD::SSUBO:
- BaseOp = ISD::SUB;
+ BaseOp = X86ISD::SUB;
Cond = X86::COND_O;
break;
case ISD::USUBO:
- BaseOp = ISD::SUB;
+ BaseOp = X86ISD::SUB;
Cond = X86::COND_C;
break;
case ISD::SMULO:
- BaseOp = ISD::MUL;
+ BaseOp = X86ISD::MUL;
Cond = X86::COND_O;
break;
case ISD::UMULO:
- BaseOp = ISD::MUL;
+ BaseOp = X86ISD::MUL;
Cond = X86::COND_C;
break;
}
@@ -6488,6 +6488,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW";
case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD";
case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
+ case X86ISD::ADD: return "X86ISD::ADD";
+ case X86ISD::SUB: return "X86ISD::SUB";
+ case X86ISD::MUL: return "X86ISD::MUL";
}
}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 6eb78f6..c7602eb 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -227,7 +227,11 @@ namespace llvm {
// PCMP* - Vector integer comparisons.
PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
- PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ
+ PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
+
+ // ADD, SUB, MUL - Arithmetic operations with overflow/carry
+ // intrinsics.
+ ADD, SUB, MUL
};
}
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index d6d08b9..49b3f3f 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -312,39 +312,76 @@ let Defs = [EFLAGS] in {
let isTwoAddress = 1 in {
let isConvertibleToThreeAddress = 1 in {
let isCommutable = 1 in
-def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+// Register-Register Addition
+def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
-def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+// Register-Register Addition with Overflow
+def ADDOvf64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
+ [(set GR64:$dst, (X86add_ovf GR64:$src1, GR64:$src2)),
(implicit EFLAGS)]>;
+
+// Register-Integer Addition
+def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
+def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
+
+// Register-Integer Addition with Overflow
+def ADDOvf64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
+def ADDOvf64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // isConvertibleToThreeAddress
-def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+// Register-Memory Addition
+def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
+
+// Register-Memory Addition with Overflow
+def ADDOvf64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86add_ovf GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
} // isTwoAddress
+// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
+
+// Memory-Register Addition with Overflow
+def ADDOvf64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst), GR64:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+def ADDOvf64mi32 : RIi32<0x81, MRM0m, (outs),(ins i64mem:$dst, i64i32imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst),
+ i64immSExt32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+def ADDOvf64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
@@ -377,38 +414,86 @@ def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
} // Uses = [EFLAGS]
let isTwoAddress = 1 in {
+// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
+
+// Register-Register Subtraction with Overflow
+def SUBOvf64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86sub_ovf GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
+// Register-Memory Subtraction
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
+
+// Register-Memory Subtraction with Overflow
+def SUBOvf64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86sub_ovf GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
-def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+// Register-Integer Subtraction
+def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
+def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
+
+// Register-Integer Subtraction with Overflow
+def SUBOvf64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i32imm:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86sub_ovf GR64:$src1,
+ i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
+def SUBOvf64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i8imm:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86sub_ovf GR64:$src1,
+ i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
+// Memory-Register Subtraction
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
+ [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
+
+// Memory-Register Subtraction with Overflow
+def SUBOvf64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst), GR64:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+
+// Memory-Integer Subtraction
+def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (load addr:$dst), i64immSExt32:$src2),
+ addr:$dst)]>;
def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst)]>;
+
+// Memory-Integer Subtraction with Overflow
+def SUBOvf64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst,i64i32imm:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst),
+ i64immSExt32:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
+def SUBOvf64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
@@ -459,38 +544,85 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
let Defs = [EFLAGS] in {
let isTwoAddress = 1 in {
let isCommutable = 1 in
-def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+// Register-Register Integer Multiplication
+def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>, TB;
-
-def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
+
+// Register-Register Multiplication with Overflow
+def IMULOvf64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "imul{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86mul_ovf GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>, TB;
+
+// Register-Memory Integer Multiplication
+def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
+
+// Register-Memory Integer Multiplication with Overflow
+def IMULOvf64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i64mem:$src2),
+ "imul{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86mul_ovf GR64:$src1,
+ (load addr:$src2))),
+ (implicit EFLAGS)]>, TB;
} // isTwoAddress
// Suprisingly enough, these are not two address instructions!
+
+// Register-Integer Integer Multiplication
def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
+
+// Register-Integer Integer Multiplication with Overflow
+def IMULOvf64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, (X86mul_ovf GR64:$src1,
+ i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
+def IMULOvf64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
+ (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, (X86mul_ovf GR64:$src1,
+ i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+
+// Memory-Integer Integer Multiplication
def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (mul (load addr:$src1),
+ i64immSExt32:$src2))]>;
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, (mul (load addr:$src1),
+ i64immSExt8:$src2))]>;
+
+// Memory-Integer Integer Multiplication with Overflow
+def IMULOvf64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
+ (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, (X86mul_ovf (load addr:$src1),
+ i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
+def IMULOvf64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
+ (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, (X86mul_ovf (load addr:$src1),
+ i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Unsigned division / remainder
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 3834f84..ffbcb16 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -27,6 +27,10 @@ def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
+def SDTArithOvf : SDTypeProfile<1, 2,
+ [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>]>;
+
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
@@ -140,6 +144,10 @@ def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInFlag]>;
+def X86add_ovf : SDNode<"X86ISD::ADD", SDTArithOvf>;
+def X86sub_ovf : SDNode<"X86ISD::SUB", SDTArithOvf>;
+def X86mul_ovf : SDNode<"X86ISD::MUL", SDTArithOvf>;
+
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
//
@@ -1923,104 +1931,202 @@ let isTwoAddress = 0 in {
// Arithmetic.
let Defs = [EFLAGS] in {
let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
-def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
- (ins GR8 :$src1, GR8 :$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
+// Register-Register Addition
+def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
+ (ins GR8 :$src1, GR8 :$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (add GR8:$src1, GR8:$src2))]>;
+
+// Register-Register Addition with Overflow
+def ADDOvf8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
+ (ins GR8 :$src1, GR8 :$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (X86add_ovf GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
+
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
+// Register-Register Addition
def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (add GR16:$src1, GR16:$src2))]>, OpSize;
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
+
+// Register-Register Addition with Overflow
+def ADDOvf16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86add_ovf GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def ADDOvf32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86add_ovf GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
} // end isConvertibleToThreeAddress
} // end isCommutable
+
+// Register-Memory Addition
def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, (add GR8:$src1, (load addr:$src2)))]>;
def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>,OpSize;
def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (add GR32:$src1, (load addr:$src2)))]>;
-def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+// Register-Memory Addition with Overflow
+def ADDOvf8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
+ (ins GR8 :$src1, i8mem :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (X86add_ovf GR8:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
+def ADDOvf16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86add_ovf GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, OpSize;
+def ADDOvf32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86add_ovf GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
+
+// Register-Integer Addition
+def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (add GR8:$src1, imm:$src2))]>;
+
+// Register-Integer Addition with Overflow
+def ADDOvf8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (X86add_ovf GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
+// Register-Integer Addition
def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (add GR16:$src1, imm:$src2))]>, OpSize;
def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (add GR32:$src1, imm:$src2))]>;
def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, OpSize;
def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>;
+
+// Register-Integer Addition with Overflow
+def ADDOvf16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
+ (ins GR16:$src1, i16imm:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86add_ovf GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def ADDOvf32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
+ (ins GR32:$src1, i32imm:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86add_ovf GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
+def ADDOvf16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
+ (ins GR16:$src1, i16i8imm:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86add_ovf GR16:$src1,
+ i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def ADDOvf32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
+ (ins GR32:$src1, i32i8imm:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86add_ovf GR32:$src1,
+ i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
}
let isTwoAddress = 0 in {
+ // Memory-Register Addition
def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), GR8:$src2), addr:$dst)]>;
def ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR16:$src2), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
+ [(store (add (load addr:$dst), GR16:$src2), addr:$dst)]>,
+ OpSize;
def ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), GR32:$src2), addr:$dst)]>;
def ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
def ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
+ [(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
+ OpSize;
def ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
def ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i16immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
+ [(store (add (load addr:$dst), i16immSExt8:$src2),
+ addr:$dst)]>, OpSize;
def ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i32immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (add (load addr:$dst), i32immSExt8:$src2),
+ addr:$dst)]>;
+
+ // Memory-Register Addition with Overflow
+ def ADDOvf8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst), GR8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def ADDOvf16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst), GR16:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def ADDOvf32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst), GR32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def ADDOvf8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (loadi8 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def ADDOvf16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (loadi16 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def ADDOvf32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (loadi32 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def ADDOvf16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst),i16immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def ADDOvf32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86add_ovf (load addr:$dst),i32immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
}
let Uses = [EFLAGS] in {
@@ -2052,84 +2158,193 @@ let isTwoAddress = 0 in {
}
} // Uses = [EFLAGS]
-def SUB8rr : I<0x28, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
-def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
-def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
-def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
-def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-
-def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+// Register-Register Subtraction
+def SUB8rr : I<0x28, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (sub GR8:$src1, GR8:$src2))]>;
+def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (sub GR16:$src1, GR16:$src2))]>, OpSize;
+def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
+
+// Register-Register Subtraction with Overflow
+def SUBOvf8rr : I<0x28, MRMDestReg, (outs GR8:$dst),
+ (ins GR8:$src1, GR8:$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (X86sub_ovf GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
+def SUBOvf16rr : I<0x29, MRMDestReg, (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86sub_ovf GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def SUBOvf32rr : I<0x29, MRMDestReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86sub_ovf GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
+
+// Register-Memory Subtraction
+def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst),
+ (ins GR8 :$src1, i8mem :$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2)))]>;
+def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2)))]>, OpSize;
+def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2)))]>;
+
+// Register-Memory Subtraction with Overflow
+def SUBOvf8rm : I<0x2A, MRMSrcMem, (outs GR8:$dst),
+ (ins GR8:$src1, i8mem:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (X86sub_ovf GR8:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
+def SUBOvf16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86sub_ovf GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, OpSize;
+def SUBOvf32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86sub_ovf GR32:$src1, (load addr:$src2))),
(implicit EFLAGS)]>;
-def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+
+// Register-Integer Subtraction
+def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
+ (ins GR8:$src1, i8imm:$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (sub GR8:$src1, imm:$src2))]>;
+def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
+ (ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
-def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ [(set GR16:$dst, (sub GR16:$src1, imm:$src2))]>, OpSize;
+def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
+ (ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
-def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
+ [(set GR32:$dst, (sub GR32:$src1, imm:$src2))]>;
+def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
+ (ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
-def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
+ [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize;
+def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
+ (ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2))]>;
+
+// Register-Integer Subtraction with Overflow
+def SUBOvf8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
+ (ins GR8:$src1, i8imm:$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (X86sub_ovf GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
+def SUBOvf16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
+ (ins GR16:$src1, i16imm:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86sub_ovf GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def SUBOvf32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
+ (ins GR32:$src1, i32imm:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86sub_ovf GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
+def SUBOvf16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
+ (ins GR16:$src1, i16i8imm:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86sub_ovf GR16:$src1,
+ i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def SUBOvf32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
+ (ins GR32:$src1, i32i8imm:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86sub_ovf GR32:$src1,
+ i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+
let isTwoAddress = 0 in {
+ // Memory-Register Subtraction
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (load addr:$dst), GR8:$src2), addr:$dst)]>;
def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR16:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
+ [(store (sub (load addr:$dst), GR16:$src2), addr:$dst)]>,
+ OpSize;
def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (load addr:$dst), GR32:$src2), addr:$dst)]>;
+
+ // Memory-Register Subtraction with Overflow
+ def SUBOvf8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst), GR8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def SUBOvf16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst), GR16:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def SUBOvf32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst), GR32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+
+ // Memory-Integer Subtraction
def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi16 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
+ [(store (sub (loadi16 addr:$dst), imm:$src2),addr:$dst)]>,
+ OpSize;
def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi32 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (loadi32 addr:$dst), imm:$src2),addr:$dst)]>;
def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i16immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
+ [(store (sub (load addr:$dst), i16immSExt8:$src2),
+ addr:$dst)]>, OpSize;
+ def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
+ [(store (sub (load addr:$dst), i32immSExt8:$src2),
+ addr:$dst)]>;
+
+ // Memory-Integer Subtraction with Overflow
+ def SUBOvf8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
+ "sub{b}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (loadi8 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def SUBOvf16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (loadi16 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def SUBOvf32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (loadi32 addr:$dst), imm:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+ def SUBOvf16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
+ "sub{w}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst),i16immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+ def SUBOvf32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
+ "sub{l}\t{$src2, $dst|$dst, $src2}",
+ [(store (X86sub_ovf (load addr:$dst),i32immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
}
let Uses = [EFLAGS] in {
@@ -2165,70 +2380,143 @@ def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src
let Defs = [EFLAGS] in {
let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
-def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+// Register-Register Integer Multiply
+def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, TB, OpSize;
-def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ [(set GR16:$dst, (mul GR16:$src1, GR16:$src2))]>, TB, OpSize;
+def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>, TB;
+
+// Register-Register Integer Multiply
+def IMULOvf16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2),
+ "imul{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86mul_ovf GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, TB, OpSize;
+def IMULOvf32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "imul{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86mul_ovf GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>, TB;
}
-def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+
+// Register-Memory Integer Multiply
+def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2)))]>,
TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2)))]>, TB;
+
+// Register-Memory Integer Multiply with Overflow
+def IMULOvf16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
+ "imul{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86mul_ovf GR16:$src1,(load addr:$src2))),
+ (implicit EFLAGS)]>,
+ TB, OpSize;
+def IMULOvf32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "imul{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86mul_ovf GR32:$src1,(load addr:$src2))),
+ (implicit EFLAGS)]>, TB;
} // Defs = [EFLAGS]
} // end Two Address instructions
// Suprisingly enough, these are not two address instructions!
let Defs = [EFLAGS] in {
+// Register-Integer Integer Multiply
def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (mul GR16:$src1, imm:$src2))]>, OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
-
+ [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2))]>;
+
+// Register-Integer Integer Multiply with Overflow
+def IMULOvf16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
+ (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, (X86mul_ovf GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def IMULOvf32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
+ (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86mul_ovf GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
+def IMULOvf16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
+ (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, (X86mul_ovf GR16:$src1,
+ i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def IMULOvf32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
+ (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86mul_ovf GR32:$src1,
+ i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+
+// Memory-Integer Integer Multiply
def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1), imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (mul (load addr:$src1), imm:$src2))]>,
+ OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1), imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (mul (load addr:$src1), imm:$src2))]>;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1), i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, (mul (load addr:$src1),
+ i16immSExt8:$src2))]>, OpSize;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1), i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, (mul (load addr:$src1),
+ i32immSExt8:$src2))]>;
+
+// Memory-Integer Integer Multiply with Overflow
+def IMULOvf16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
+ (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, (X86mul_ovf (load addr:$src1),
+ imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def IMULOvf32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
+ (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86mul_ovf (load addr:$src1),
+ imm:$src2)),
+ (implicit EFLAGS)]>;
+def IMULOvf16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
+ (outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, (X86mul_ovf (load addr:$src1),
+ i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def IMULOvf32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
+ (outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86mul_ovf (load addr:$src1),
+ i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//