diff options
author | Steve Block <steveblock@google.com> | 2011-05-25 19:08:45 +0100 |
---|---|---|
committer | Steve Block <steveblock@google.com> | 2011-06-08 13:51:31 +0100 |
commit | 2bde8e466a4451c7319e3a072d118917957d6554 (patch) | |
tree | 28f4a1b869a513e565c7760d0e6a06e7cf1fe95a /Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h | |
parent | 6939c99b71d9372d14a0c74a772108052e8c48c8 (diff) | |
download | external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.zip external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.gz external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.bz2 |
Merge WebKit at r82507: Initial merge by git
Change-Id: I60ce9d780725b58b45e54165733a8ffee23b683e
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h')
-rw-r--r-- | Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h | 385 |
1 files changed, 282 insertions, 103 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h index a02074c..f5829dd 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -83,7 +83,7 @@ public: // Integer arithmetic operations: // // Operations are typically two operand - operation(source, srcDst) - // For many operations the source may be an Imm32, the srcDst operand + // For many operations the source may be an TrustedImm32, the srcDst operand // may often be a memory location (explictly described using an Address // object). @@ -92,12 +92,12 @@ public: m_assembler.addl_rr(src, dest); } - void add32(Imm32 imm, Address address) + void add32(TrustedImm32 imm, Address address) { m_assembler.addl_im(imm.m_value, address.offset, address.base); } - void add32(Imm32 imm, RegisterID dest) + void add32(TrustedImm32 imm, RegisterID dest) { m_assembler.addl_ir(imm.m_value, dest); } @@ -117,7 +117,7 @@ public: m_assembler.andl_rr(src, dest); } - void and32(Imm32 imm, RegisterID dest) + void and32(TrustedImm32 imm, RegisterID dest) { m_assembler.andl_ir(imm.m_value, dest); } @@ -132,36 +132,64 @@ public: m_assembler.andl_mr(src.offset, src.base, dest); } - void and32(Imm32 imm, Address address) + void and32(TrustedImm32 imm, Address address) { m_assembler.andl_im(imm.m_value, address.offset, address.base); } - void lshift32(Imm32 imm, RegisterID dest) + void and32(RegisterID op1, RegisterID op2, RegisterID dest) { - m_assembler.shll_i8r(imm.m_value, dest); + if (op1 == op2) + zeroExtend32ToPtr(op1, dest); + else if (op1 == dest) + and32(op2, dest); + else { + move(op2, dest); + and32(op1, dest); + } } - + + void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(src, dest); + and32(imm, dest); + } + void lshift32(RegisterID shift_amount, RegisterID dest) { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86Registers::ecx) { - swap(shift_amount, X86Registers::ecx); + ASSERT(shift_amount != dest); - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.shll_CLr(X86Registers::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86Registers::ecx) - m_assembler.shll_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.shll_CLr(dest); - - swap(shift_amount, X86Registers::ecx); - } else + if (shift_amount == X86Registers::ecx) m_assembler.shll_CLr(dest); + else { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + // If we dest is ecx, then shift the swapped register! + swap(shift_amount, X86Registers::ecx); + m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest); + swap(shift_amount, X86Registers::ecx); + } + } + + void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) + { + ASSERT(shift_amount != dest); + + if (src != dest) + move(src, dest); + lshift32(shift_amount, dest); + } + + void lshift32(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shll_i8r(imm.m_value, dest); + } + + void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + if (src != dest) + move(src, dest); + lshift32(imm, dest); } void mul32(RegisterID src, RegisterID dest) @@ -174,7 +202,7 @@ public: m_assembler.imull_mr(src.offset, src.base, dest); } - void mul32(Imm32 imm, RegisterID src, RegisterID dest) + void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) { m_assembler.imull_i32r(src, imm.m_value, dest); } @@ -204,7 +232,7 @@ public: m_assembler.orl_rr(src, dest); } - void or32(Imm32 imm, RegisterID dest) + void or32(TrustedImm32 imm, RegisterID dest) { m_assembler.orl_ir(imm.m_value, dest); } @@ -219,76 +247,114 @@ public: m_assembler.orl_mr(src.offset, src.base, dest); } - void or32(Imm32 imm, Address address) + void or32(TrustedImm32 imm, Address address) { m_assembler.orl_im(imm.m_value, address.offset, address.base); } + void or32(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2) + zeroExtend32ToPtr(op1, dest); + else if (op1 == dest) + or32(op2, dest); + else { + move(op2, dest); + or32(op1, dest); + } + } + + void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(src, dest); + or32(imm, dest); + } + void rshift32(RegisterID shift_amount, RegisterID dest) { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86Registers::ecx) { - swap(shift_amount, X86Registers::ecx); + ASSERT(shift_amount != dest); - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarl_CLr(X86Registers::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86Registers::ecx) - m_assembler.sarl_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarl_CLr(dest); - - swap(shift_amount, X86Registers::ecx); - } else + if (shift_amount == X86Registers::ecx) m_assembler.sarl_CLr(dest); + else { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + // If we dest is ecx, then shift the swapped register! + swap(shift_amount, X86Registers::ecx); + m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest); + swap(shift_amount, X86Registers::ecx); + } + } + + void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) + { + ASSERT(shift_amount != dest); + + if (src != dest) + move(src, dest); + rshift32(shift_amount, dest); } - void rshift32(Imm32 imm, RegisterID dest) + void rshift32(TrustedImm32 imm, RegisterID dest) { m_assembler.sarl_i8r(imm.m_value, dest); } + void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + if (src != dest) + move(src, dest); + rshift32(imm, dest); + } + void urshift32(RegisterID shift_amount, RegisterID dest) { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86Registers::ecx) { + ASSERT(shift_amount != dest); + + if (shift_amount == X86Registers::ecx) + m_assembler.shrl_CLr(dest); + else { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + // If we dest is ecx, then shift the swapped register! swap(shift_amount, X86Registers::ecx); - - // E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.shrl_CLr(X86Registers::ecx); - // E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86Registers::ecx) - m_assembler.shrl_CLr(shift_amount); - // E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.shrl_CLr(dest); - + m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest); swap(shift_amount, X86Registers::ecx); - } else - m_assembler.shrl_CLr(dest); + } } - - void urshift32(Imm32 imm, RegisterID dest) + + void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) { - m_assembler.shrl_i8r(imm.m_value, dest); + ASSERT(shift_amount != dest); + + if (src != dest) + move(src, dest); + urshift32(shift_amount, dest); } + void urshift32(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shrl_i8r(imm.m_value, dest); + } + + void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + if (src != dest) + move(src, dest); + urshift32(imm, dest); + } + void sub32(RegisterID src, RegisterID dest) { m_assembler.subl_rr(src, dest); } - void sub32(Imm32 imm, RegisterID dest) + void sub32(TrustedImm32 imm, RegisterID dest) { m_assembler.subl_ir(imm.m_value, dest); } - void sub32(Imm32 imm, Address address) + void sub32(TrustedImm32 imm, Address address) { m_assembler.subl_im(imm.m_value, address.offset, address.base); } @@ -309,12 +375,12 @@ public: m_assembler.xorl_rr(src, dest); } - void xor32(Imm32 imm, Address dest) + void xor32(TrustedImm32 imm, Address dest) { m_assembler.xorl_im(imm.m_value, dest.offset, dest.base); } - void xor32(Imm32 imm, RegisterID dest) + void xor32(TrustedImm32 imm, RegisterID dest) { m_assembler.xorl_ir(imm.m_value, dest); } @@ -329,6 +395,24 @@ public: m_assembler.xorl_mr(src.offset, src.base, dest); } + void xor32(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2) + move(TrustedImm32(0), dest); + else if (op1 == dest) + xor32(op2, dest); + else { + move(op2, dest); + xor32(op1, dest); + } + } + + void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(src, dest); + xor32(imm, dest); + } + void sqrtDouble(FPRegisterID src, FPRegisterID dst) { m_assembler.sqrtsd_rr(src, dst); @@ -337,7 +421,7 @@ public: // Memory access operations: // // Loads are of the form load(address, destination) and stores of the form - // store(source, address). The source for a store may be an Imm32. Address + // store(source, address). The source for a store may be an TrustedImm32. Address // operand objects to loads and store will be implicitly constructed if a // register is passed. @@ -388,7 +472,7 @@ public: m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); } - void store32(Imm32 imm, ImplicitAddress address) + void store32(TrustedImm32 imm, ImplicitAddress address) { m_assembler.movl_i32m(imm.m_value, address.offset, address.base); } @@ -398,6 +482,13 @@ public: // // Presently only supports SSE, not x87 floating point. + void moveDouble(FPRegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (src != dest) + m_assembler.movsd_rr(src, dest); + } + void loadDouble(ImplicitAddress address, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -416,6 +507,17 @@ public: m_assembler.addsd_rr(src, dest); } + void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) + addDouble(op2, dest); + else { + moveDouble(op2, dest); + addDouble(op1, dest); + } + } + void addDouble(Address src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -428,6 +530,15 @@ public: m_assembler.divsd_rr(src, dest); } + void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + // B := A / B is invalid. + ASSERT(op1 == dest || op2 != dest); + + moveDouble(op1, dest); + divDouble(op2, dest); + } + void divDouble(Address src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -440,6 +551,15 @@ public: m_assembler.subsd_rr(src, dest); } + void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + // B := A - B is invalid. + ASSERT(op1 == dest || op2 != dest); + + moveDouble(op1, dest); + subDouble(op2, dest); + } + void subDouble(Address src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -452,6 +572,17 @@ public: m_assembler.mulsd_rr(src, dest); } + void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + if (op1 == dest) + mulDouble(op2, dest); + else { + moveDouble(op2, dest); + mulDouble(op1, dest); + } + } + void mulDouble(Address src, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -501,11 +632,12 @@ public: // If the result is not representable as a 32 bit value, branch. // May also branch for some values that are representable in 32 bits // (specifically, in this case, INT_MIN). - Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest) + enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; + Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) { ASSERT(isSSE2Present()); m_assembler.cvttsd2si_rr(src, dest); - return branch32(Equal, dest, Imm32(0x80000000)); + return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000)); } // Convert 'src' to an integer, and places the resulting 'dest'. @@ -564,7 +696,7 @@ public: m_assembler.push_m(address.offset, address.base); } - void push(Imm32 imm) + void push(TrustedImm32 imm) { m_assembler.push_i32(imm.m_value); } @@ -574,9 +706,9 @@ public: // // Move values in registers. - void move(Imm32 imm, RegisterID dest) + void move(TrustedImm32 imm, RegisterID dest) { - // Note: on 64-bit the Imm32 value is zero extended into the register, it + // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it // may be useful to have a separate version that sign extends the value? if (!imm.m_value) m_assembler.xorl_rr(dest, dest); @@ -593,7 +725,7 @@ public: m_assembler.movq_rr(src, dest); } - void move(ImmPtr imm, RegisterID dest) + void move(TrustedImmPtr imm, RegisterID dest) { m_assembler.movq_i64r(imm.asIntptr(), dest); } @@ -620,7 +752,7 @@ public: m_assembler.movl_rr(src, dest); } - void move(ImmPtr imm, RegisterID dest) + void move(TrustedImmPtr imm, RegisterID dest) { m_assembler.movl_i32r(imm.asIntptr(), dest); } @@ -655,14 +787,14 @@ public: // used (representing the names 'below' and 'above'). // // Operands to the comparision are provided in the expected order, e.g. - // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when + // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when // treated as a signed 32bit value, is less than or equal to 5. // // jz and jnz test whether the first operand is equal to zero, and take // an optional second operand of a mask under which to perform the test. public: - Jump branch8(Condition cond, Address left, Imm32 right) + Jump branch8(Condition cond, Address left, TrustedImm32 right) { m_assembler.cmpb_im(right.m_value, left.offset, left.base); return Jump(m_assembler.jCC(x86Condition(cond))); @@ -674,7 +806,7 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch32(Condition cond, RegisterID left, Imm32 right) + Jump branch32(Condition cond, RegisterID left, TrustedImm32 right) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) m_assembler.testl_rr(left, left); @@ -695,19 +827,19 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch32(Condition cond, Address left, Imm32 right) + Jump branch32(Condition cond, Address left, TrustedImm32 right) { m_assembler.cmpl_im(right.m_value, left.offset, left.base); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch32(Condition cond, BaseIndex left, Imm32 right) + Jump branch32(Condition cond, BaseIndex left, TrustedImm32 right) { m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) + Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, TrustedImm32 right) { return branch32(cond, left, right); } @@ -718,7 +850,7 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch16(Condition cond, BaseIndex left, Imm32 right) + Jump branch16(Condition cond, BaseIndex left, TrustedImm32 right) { ASSERT(!(right.m_value & 0xFFFF0000)); @@ -728,14 +860,14 @@ public: Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); m_assembler.testl_rr(reg, mask); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + Jump branchTest32(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); // if we are only interested in the low seven bits, this can be tested with a testb if (mask.m_value == -1) m_assembler.testl_rr(reg, reg); @@ -746,9 +878,9 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) + Jump branchTest32(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); if (mask.m_value == -1) m_assembler.cmpl_im(0, address.offset, address.base); else @@ -756,9 +888,9 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + Jump branchTest32(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); if (mask.m_value == -1) m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); else @@ -766,9 +898,23 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1)) + Jump branchTest8(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. + ASSERT(mask.m_value >= -128 && mask.m_value <= 255); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); + if (mask.m_value == -1) + m_assembler.testb_rr(reg, reg); + else + m_assembler.testb_i8r(mask.m_value, reg); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchTest8(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. + ASSERT(mask.m_value >= -128 && mask.m_value <= 255); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); if (mask.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base); else @@ -776,9 +922,11 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + Jump branchTest8(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. + ASSERT(mask.m_value >= -128 && mask.m_value <= 255); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); if (mask.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale); else @@ -820,14 +968,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest) + Jump branchAdd32(Condition cond, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); add32(imm, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchAdd32(Condition cond, Imm32 src, Address dest) + Jump branchAdd32(Condition cond, TrustedImm32 src, Address dest) { ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); add32(src, dest); @@ -848,6 +996,20 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchAdd32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchAdd32(cond, src2, dest); + move(src2, dest); + return branchAdd32(cond, src1, dest); + } + + Jump branchAdd32(Condition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(src, dest); + return branchAdd32(cond, imm, dest); + } + Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) { ASSERT(cond == Overflow); @@ -862,13 +1024,21 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(Condition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) { ASSERT(cond == Overflow); mul32(imm, src, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchMul32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchMul32(cond, src2, dest); + move(src2, dest); + return branchMul32(cond, src1, dest); + } + Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); @@ -876,14 +1046,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest) + Jump branchSub32(Condition cond, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); sub32(imm, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchSub32(Condition cond, Imm32 imm, Address dest) + Jump branchSub32(Condition cond, TrustedImm32 imm, Address dest) { ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); sub32(imm, dest); @@ -904,6 +1074,15 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchSub32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + // B := A - B is invalid. + ASSERT(src1 == dest || src2 != dest); + + move(src1, dest); + return branchSub32(cond, src2, dest); + } + Jump branchNeg32(Condition cond, RegisterID srcDest) { ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); @@ -958,7 +1137,7 @@ public: m_assembler.setCC_r(x86Condition(cond), dest); } - void set8Compare32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + void set8Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) m_assembler.testl_rr(left, left); @@ -974,7 +1153,7 @@ public: m_assembler.movzbl_rr(dest, dest); } - void set32Compare32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + void set32Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) m_assembler.testl_rr(left, left); @@ -989,7 +1168,7 @@ public: // dest-src, operations always have a dest? ... possibly not true, considering // asm ops like test, or pseudo ops like pop(). - void set32Test8(Condition cond, Address address, Imm32 mask, RegisterID dest) + void set32Test8(Condition cond, Address address, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base); @@ -999,7 +1178,7 @@ public: m_assembler.movzbl_rr(dest, dest); } - void set32Test32(Condition cond, Address address, Imm32 mask, RegisterID dest) + void set32Test32(Condition cond, Address address, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) m_assembler.cmpl_im(0, address.offset, address.base); |