diff options
author | Bill Wendling <isanbard@gmail.com> | 2008-12-12 00:56:36 +0000 |
---|---|---|
committer | Bill Wendling <isanbard@gmail.com> | 2008-12-12 00:56:36 +0000 |
commit | ab55ebda1c2254f98b06e770bc2dae7d05a4a366 (patch) | |
tree | 770f22dcefcbafbbb4a2a91a7a195d7eca15ab47 /lib/Target/X86/X86Instr64bit.td | |
parent | 905315441ea40b068d58f5d9ff8328264af99e90 (diff) | |
download | external_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.zip external_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.tar.gz external_llvm-ab55ebda1c2254f98b06e770bc2dae7d05a4a366.tar.bz2 |
Redo the arithmetic with overflow architecture. I was changing the semantics of
ISD::ADD to emit an implicit EFLAGS. This was horribly broken. Instead, replace
the intrinsic with an ISD::SADDO node. Then custom lower that into an
X86ISD::ADD node with a associated SETCC that checks the correct condition code
(overflow or carry). Then that gets lowered into the correct X86::ADDOvf
instruction.
Similar for SUB and MUL instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60915 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86Instr64bit.td')
-rw-r--r-- | lib/Target/X86/X86Instr64bit.td | 236 |
1 files changed, 184 insertions, 52 deletions
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index d6d08b9..49b3f3f 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -312,39 +312,76 @@ let Defs = [EFLAGS] in { let isTwoAddress = 1 in { let isConvertibleToThreeAddress = 1 in { let isCommutable = 1 in -def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), - (implicit EFLAGS)]>; +// Register-Register Addition +def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>; -def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)), - (implicit EFLAGS)]>; -def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +// Register-Register Addition with Overflow +def ADDOvf64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)), + [(set GR64:$dst, (X86add_ovf GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; + +// Register-Integer Addition +def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>; +def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>; + +// Register-Integer Addition with Overflow +def ADDOvf64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; +def ADDOvf64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; } // isConvertibleToThreeAddress -def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))), - (implicit EFLAGS)]>; +// Register-Memory Addition +def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>; + +// Register-Memory Addition with Overflow +def ADDOvf64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86add_ovf GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; } // isTwoAddress +// Memory-Register Addition def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), GR64:$src2), addr:$dst), - (implicit EFLAGS)]>; + [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>; def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst), - (implicit EFLAGS)]>; + [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>; def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst), - (implicit EFLAGS)]>; + [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + +// Memory-Register Addition with Overflow +def ADDOvf64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86add_ovf (load addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)]>; +def ADDOvf64mi32 : RIi32<0x81, MRM0m, (outs),(ins i64mem:$dst, i64i32imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86add_ovf (load addr:$dst), + i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)]>; +def ADDOvf64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86add_ovf (load addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)]>; let Uses = [EFLAGS] in { let isTwoAddress = 1 in { @@ -377,38 +414,86 @@ def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2), } // Uses = [EFLAGS] let isTwoAddress = 1 in { +// Register-Register Subtraction def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>; + +// Register-Register Subtraction with Overflow +def SUBOvf64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86sub_ovf GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; +// Register-Memory Subtraction def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))), - (implicit EFLAGS)]>; + [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>; + +// Register-Memory Subtraction with Overflow +def SUBOvf64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86sub_ovf GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; -def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +// Register-Integer Subtraction +def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)), - (implicit EFLAGS)]>; -def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>; +def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>; + +// Register-Integer Subtraction with Overflow +def SUBOvf64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86sub_ovf GR64:$src1, + i64immSExt32:$src2)), + (implicit EFLAGS)]>; +def SUBOvf64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86sub_ovf GR64:$src1, + i64immSExt8:$src2)), + (implicit EFLAGS)]>; } // isTwoAddress +// Memory-Register Subtraction def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), GR64:$src2), addr:$dst), - (implicit EFLAGS)]>; -def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), + [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>; + +// Memory-Register Subtraction with Overflow +def SUBOvf64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86sub_ovf (load addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)]>; + +// Memory-Integer Subtraction +def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst), - (implicit EFLAGS)]>; + [(store (sub (load addr:$dst), i64immSExt32:$src2), + addr:$dst)]>; def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst), - (implicit EFLAGS)]>; + [(store (sub (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; + +// Memory-Integer Subtraction with Overflow +def SUBOvf64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst,i64i32imm:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86sub_ovf (load addr:$dst), + i64immSExt32:$src2), addr:$dst), + (implicit EFLAGS)]>; +def SUBOvf64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)]>; let Uses = [EFLAGS] in { let isTwoAddress = 1 in { @@ -459,38 +544,85 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src), let Defs = [EFLAGS] in { let isTwoAddress = 1 in { let isCommutable = 1 in -def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +// Register-Register Integer Multiplication +def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "imul{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)), - (implicit EFLAGS)]>, TB; - -def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB; + +// Register-Register Multiplication with Overflow +def IMULOvf64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "imul{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86mul_ovf GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>, TB; + +// Register-Memory Integer Multiplication +def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "imul{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))), - (implicit EFLAGS)]>, TB; + [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB; + +// Register-Memory Integer Multiplication with Overflow +def IMULOvf64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), + "imul{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86mul_ovf GR64:$src1, + (load addr:$src2))), + (implicit EFLAGS)]>, TB; } // isTwoAddress // Suprisingly enough, these are not two address instructions! + +// Register-Integer Integer Multiplication def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>; def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>; + +// Register-Integer Integer Multiplication with Overflow +def IMULOvf64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32 + (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR64:$dst, (X86mul_ovf GR64:$src1, + i64immSExt32:$src2)), + (implicit EFLAGS)]>; +def IMULOvf64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8 + (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR64:$dst, (X86mul_ovf GR64:$src1, + i64immSExt8:$src2)), + (implicit EFLAGS)]>; + +// Memory-Integer Integer Multiplication def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (mul (load addr:$src1), + i64immSExt32:$src2))]>; def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2)), - (implicit EFLAGS)]>; + [(set GR64:$dst, (mul (load addr:$src1), + i64immSExt8:$src2))]>; + +// Memory-Integer Integer Multiplication with Overflow +def IMULOvf64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 + (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2), + "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR64:$dst, (X86mul_ovf (load addr:$src1), + i64immSExt32:$src2)), + (implicit EFLAGS)]>; +def IMULOvf64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8 + (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2), + "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR64:$dst, (X86mul_ovf (load addr:$src1), + i64immSExt8:$src2)), + (implicit EFLAGS)]>; } // Defs = [EFLAGS] // Unsigned division / remainder |