diff options
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.cpp | 42 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.h | 48 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMv7Assembler.h | 5 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssembler.h | 10 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARM.h | 133 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARMv7.h | 49 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86Common.h | 77 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86_64.h | 27 |
8 files changed, 308 insertions, 83 deletions
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp index 1324586..81c3222 100644 --- a/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/JavaScriptCore/assembler/ARMAssembler.cpp @@ -118,7 +118,7 @@ ARMWord ARMAssembler::getOp2(ARMWord imm) if ((imm & 0x00ffffff) == 0) return OP2_IMM | (imm >> 24) | (rol << 8); - return 0; + return INVALID_IMM; } int ARMAssembler::genInt(int reg, ARMWord imm, bool positive) @@ -236,25 +236,18 @@ ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert) // Do it by 1 instruction tmp = getOp2(imm); - if (tmp) + if (tmp != INVALID_IMM) return tmp; tmp = getOp2(~imm); - if (tmp) { + if (tmp != INVALID_IMM) { if (invert) return tmp | OP2_INV_IMM; mvn_r(tmpReg, tmp); return tmpReg; } - // Do it by 2 instruction - if (genInt(tmpReg, imm, true)) - return tmpReg; - if (genInt(tmpReg, ~imm, false)) - return tmpReg; - - ldr_imm(tmpReg, imm); - return tmpReg; + return encodeComplexImm(imm, tmpReg); } void ARMAssembler::moveImm(ARMWord imm, int dest) @@ -263,24 +256,43 @@ void ARMAssembler::moveImm(ARMWord imm, int dest) // Do it by 1 instruction tmp = getOp2(imm); - if (tmp) { + if (tmp != INVALID_IMM) { mov_r(dest, tmp); return; } tmp = getOp2(~imm); - if (tmp) { + if (tmp != INVALID_IMM) { mvn_r(dest, tmp); return; } + encodeComplexImm(imm, dest); +} + +ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) +{ + ARMWord tmp; + +#if ARM_ARCH_VERSION >= 7 + tmp = getImm16Op2(imm); + if (tmp != INVALID_IMM) { + movw_r(dest, tmp); + return dest; + } + movw_r(dest, getImm16Op2(imm & 0xffff)); + movt_r(dest, getImm16Op2(imm >> 16)); + return dest; +#else // Do it by 2 instruction if (genInt(dest, imm, true)) - return; + return dest; if (genInt(dest, ~imm, false)) - return; + return dest; ldr_imm(dest, imm); + return dest; +#endif } // Memory load/store helpers diff --git a/JavaScriptCore/assembler/ARMAssembler.h b/JavaScriptCore/assembler/ARMAssembler.h index 9f9a450..712473e 100644 --- a/JavaScriptCore/assembler/ARMAssembler.h +++ b/JavaScriptCore/assembler/ARMAssembler.h @@ -121,6 +121,7 @@ namespace JSC { MUL = 0x00000090, MULL = 0x00c00090, FADDD = 0x0e300b00, + FDIVD = 0x0e800b00, FSUBD = 0x0e300b40, FMULD = 0x0e200b00, FCMPD = 0x0eb40b40, @@ -133,12 +134,18 @@ namespace JSC { B = 0x0a000000, BL = 0x0b000000, FMSR = 0x0e000a10, + FMRS = 0x0e100a10, FSITOD = 0x0eb80bc0, + FTOSID = 0x0ebd0b40, FMSTAT = 0x0ef1fa10, #if ARM_ARCH_VERSION >= 5 CLZ = 0x016f0f10, BKPT = 0xe120070, #endif +#if ARM_ARCH_VERSION >= 7 + MOVW = 0x03000000, + MOVT = 0x03400000, +#endif }; enum { @@ -175,6 +182,8 @@ namespace JSC { padForAlign32 = 0xee120070, }; + static const ARMWord INVALID_IMM = 0xf0000000; + class JmpSrc { friend class ARMAssembler; public: @@ -333,6 +342,20 @@ namespace JSC { emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2); } +#if ARM_ARCH_VERSION >= 7 + void movw_r(int rd, ARMWord op2, Condition cc = AL) + { + ASSERT((op2 | 0xf0fff) == 0xf0fff); + m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2); + } + + void movt_r(int rd, ARMWord op2, Condition cc = AL) + { + ASSERT((op2 | 0xf0fff) == 0xf0fff); + m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2); + } +#endif + void movs_r(int rd, ARMWord op2, Condition cc = AL) { emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2); @@ -378,6 +401,11 @@ namespace JSC { emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm); } + void fdivd_r(int dd, int dn, int dm, Condition cc = AL) + { + emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm); + } + void fsubd_r(int dd, int dn, int dm, Condition cc = AL) { emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm); @@ -482,11 +510,21 @@ namespace JSC { emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0); } + void fmrs_r(int rd, int dn, Condition cc = AL) + { + emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0); + } + void fsitod_r(int dd, int dm, Condition cc = AL) { emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm); } + void ftosid_r(int fd, int dm, Condition cc = AL) + { + emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm); + } + void fmstat(Condition cc = AL) { m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT); @@ -708,8 +746,18 @@ namespace JSC { } static ARMWord getOp2(ARMWord imm); + +#if ARM_ARCH_VERSION >= 7 + static ARMWord getImm16Op2(ARMWord imm) + { + if (imm <= 0xffff) + return (imm & 0xf000) << 4 | (imm & 0xfff); + return INVALID_IMM; + } +#endif ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false); void moveImm(ARMWord imm, int dest); + ARMWord encodeComplexImm(ARMWord imm, int dest); // Memory load/store helpers diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h index 02ce2e9..e253a53 100644 --- a/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/JavaScriptCore/assembler/ARMv7Assembler.h @@ -236,6 +236,11 @@ class ARMThumbImmediate { ARMThumbImmediate(ThumbImmediateType type, uint16_t value) : m_type(TypeUInt16) { + // Make sure this constructor is only reached with type TypeUInt16; + // this extra parameter makes the code a little clearer by making it + // explicit at call sites which type is being constructed + ASSERT_UNUSED(type, type == TypeUInt16); + m_value.asInt = value; } diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h index 2743ab4..858707d 100644 --- a/JavaScriptCore/assembler/MacroAssembler.h +++ b/JavaScriptCore/assembler/MacroAssembler.h @@ -179,16 +179,6 @@ public: or32(imm, dest); } - void rshiftPtr(RegisterID shift_amount, RegisterID dest) - { - rshift32(shift_amount, dest); - } - - void rshiftPtr(Imm32 imm, RegisterID dest) - { - rshift32(imm, dest); - } - void subPtr(RegisterID src, RegisterID dest) { sub32(src, dest); diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.h b/JavaScriptCore/assembler/MacroAssemblerARM.h index 7a72b06..24e2e11 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -38,6 +38,9 @@ namespace JSC { class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> { + static const int DoubleConditionMask = 0x0f; + static const int DoubleConditionBitSpecial = 0x10; + COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes); public: enum Condition { Equal = ARMAssembler::EQ, @@ -57,11 +60,20 @@ public: }; enum DoubleCondition { + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. DoubleEqual = ARMAssembler::EQ, + DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial, DoubleGreaterThan = ARMAssembler::GT, DoubleGreaterThanOrEqual = ARMAssembler::GE, - DoubleLessThan = ARMAssembler::LT, - DoubleLessThanOrEqual = ARMAssembler::LE, + DoubleLessThan = ARMAssembler::CC, + DoubleLessThanOrEqual = ARMAssembler::LS, + // If either operand is NaN, these conditions always evaluate to true. + DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial, + DoubleNotEqualOrUnordered = ARMAssembler::NE, + DoubleGreaterThanOrUnordered = ARMAssembler::HI, + DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS, + DoubleLessThanOrUnordered = ARMAssembler::LT, + DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE, }; static const RegisterID stackPointerRegister = ARMRegisters::sp; @@ -106,14 +118,18 @@ public: m_assembler.ands_r(dest, dest, w); } - void lshift32(Imm32 imm, RegisterID dest) + void lshift32(RegisterID shift_amount, RegisterID dest) { - m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f)); + ARMWord w = ARMAssembler::getOp2(0x1f); + ASSERT(w != ARMAssembler::INVALID_IMM); + m_assembler.and_r(ARMRegisters::S0, shift_amount, w); + + m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0)); } - void lshift32(RegisterID shift_amount, RegisterID dest) + void lshift32(Imm32 imm, RegisterID dest) { - m_assembler.movs_r(dest, m_assembler.lsl_r(dest, shift_amount)); + m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f)); } void mul32(RegisterID src, RegisterID dest) @@ -131,6 +147,11 @@ public: m_assembler.muls_r(dest, src, ARMRegisters::S0); } + void neg32(RegisterID srcDest) + { + m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0)); + } + void not32(RegisterID dest) { m_assembler.mvns_r(dest, dest); @@ -148,7 +169,11 @@ public: void rshift32(RegisterID shift_amount, RegisterID dest) { - m_assembler.movs_r(dest, m_assembler.asr_r(dest, shift_amount)); + ARMWord w = ARMAssembler::getOp2(0x1f); + ASSERT(w != ARMAssembler::INVALID_IMM); + m_assembler.and_r(ARMRegisters::S0, shift_amount, w); + + m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0)); } void rshift32(Imm32 imm, RegisterID dest) @@ -505,6 +530,13 @@ public: return Jump(m_assembler.jmp(ARMCondition(cond))); } + Jump branchOr32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); + or32(src, dest); + return Jump(m_assembler.jmp(ARMCondition(cond))); + } + void breakpoint() { m_assembler.bkpt(0); @@ -548,6 +580,25 @@ public: m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); } + void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest) + { + // ARM doesn't have byte registers + set32(cond, left, right, dest); + } + + void set8(Condition cond, Address left, RegisterID right, RegisterID dest) + { + // ARM doesn't have byte registers + load32(left, ARMRegisters::S1); + set32(cond, ARMRegisters::S1, right, dest); + } + + void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + { + // ARM doesn't have byte registers + set32(cond, left, right, dest); + } + void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) { load32(address, ARMRegisters::S1); @@ -559,6 +610,12 @@ public: m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); } + void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest) + { + // ARM doesn't have byte registers + setTest32(cond, address, mask, dest); + } + void add32(Imm32 imm, RegisterID src, RegisterID dest) { m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); @@ -666,6 +723,12 @@ public: m_assembler.doubleTransfer(true, dest, address.base, address.offset); } + void loadDouble(void* address, FPRegisterID dest) + { + m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address); + m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0); + } + void storeDouble(FPRegisterID src, ImplicitAddress address) { m_assembler.doubleTransfer(false, src, address.base, address.offset); @@ -682,6 +745,18 @@ public: addDouble(ARMRegisters::SD0, dest); } + void divDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fdivd_r(dest, dest, src); + } + + void divDouble(Address src, FPRegisterID dest) + { + ASSERT_NOT_REACHED(); // Untested + loadDouble(src, ARMRegisters::SD0); + divDouble(ARMRegisters::SD0, dest); + } + void subDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.fsubd_r(dest, dest, src); @@ -710,11 +785,30 @@ public: m_assembler.fsitod_r(dest, dest); } + void convertInt32ToDouble(Address src, FPRegisterID dest) + { + ASSERT_NOT_REACHED(); // Untested + // flds does not worth the effort here + load32(src, ARMRegisters::S1); + convertInt32ToDouble(ARMRegisters::S1, dest); + } + + void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) + { + ASSERT_NOT_REACHED(); // Untested + // flds does not worth the effort here + m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr); + m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + convertInt32ToDouble(ARMRegisters::S1, dest); + } + Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) { m_assembler.fcmpd_r(left, right); m_assembler.fmstat(); - return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond))); + if (cond & DoubleConditionBitSpecial) + m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS); + return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask))); } // Truncates 'src' to an integer, and places the resulting 'dest'. @@ -729,6 +823,29 @@ public: return jump(); } + // Convert 'src' to an integer, and places the resulting 'dest'. + // If the result is not representable as a 32 bit value, branch. + // May also branch for some values that are representable in 32 bits + // (specifically, in this case, 0). + void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) + { + m_assembler.ftosid_r(ARMRegisters::SD0, src); + m_assembler.fmrs_r(dest, ARMRegisters::SD0); + + // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. + m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0); + failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0)); + + // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0 + failureCases.append(branchTest32(Zero, dest)); + } + + void zeroDouble(FPRegisterID srcDest) + { + m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); + convertInt32ToDouble(ARMRegisters::S0, srcDest); + } + protected: ARMAssembler::Condition ARMCondition(Condition cond) { diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h index c479517..532a9cf 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -93,13 +93,21 @@ public: Zero = ARMv7Assembler::ConditionEQ, NonZero = ARMv7Assembler::ConditionNE }; - enum DoubleCondition { + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. DoubleEqual = ARMv7Assembler::ConditionEQ, + DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently. DoubleGreaterThan = ARMv7Assembler::ConditionGT, DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE, DoubleLessThan = ARMv7Assembler::ConditionLO, DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS, + // If either operand is NaN, these conditions always evaluate to true. + DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently. + DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE, + DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI, + DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS, + DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT, + DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE, }; static const RegisterID stackPointerRegister = ARMRegisters::sp; @@ -189,14 +197,19 @@ public: } } - void lshift32(Imm32 imm, RegisterID dest) + void lshift32(RegisterID shift_amount, RegisterID dest) { - m_assembler.lsl(dest, dest, imm.m_value); + // Clamp the shift to the range 0..31 + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); + ASSERT(armImm.isValid()); + m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); + + m_assembler.lsl(dest, dest, dataTempRegister); } - void lshift32(RegisterID shift_amount, RegisterID dest) + void lshift32(Imm32 imm, RegisterID dest) { - m_assembler.lsl(dest, dest, shift_amount); + m_assembler.lsl(dest, dest, imm.m_value & 0x1f); } void mul32(RegisterID src, RegisterID dest) @@ -233,12 +246,17 @@ public: void rshift32(RegisterID shift_amount, RegisterID dest) { - m_assembler.asr(dest, dest, shift_amount); + // Clamp the shift to the range 0..31 + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); + ASSERT(armImm.isValid()); + m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); + + m_assembler.asr(dest, dest, dataTempRegister); } void rshift32(Imm32 imm, RegisterID dest) { - m_assembler.asr(dest, dest, imm.m_value); + m_assembler.asr(dest, dest, imm.m_value & 0x1f); } void sub32(RegisterID src, RegisterID dest) @@ -531,6 +549,23 @@ public: { m_assembler.vcmp_F64(left, right); m_assembler.vmrs_APSR_nzcv_FPSCR(); + + if (cond == DoubleNotEqual) { + // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. + Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); + Jump result = makeBranch(ARMv7Assembler::ConditionNE); + unordered.link(this); + return result; + } + if (cond == DoubleEqualOrUnordered) { + Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); + Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered, or equal. + Jump result = makeJump(); + notEqual.link(this); + return result; + } return makeBranch(cond); } diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h index 5ebefa7..96d21f1 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -36,6 +36,10 @@ namespace JSC { class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { + static const int DoubleConditionBitInvert = 0x10; + static const int DoubleConditionBitSpecial = 0x20; + static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial; + public: enum Condition { @@ -56,13 +60,24 @@ public: }; enum DoubleCondition { - DoubleEqual = X86Assembler::ConditionE, + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. + DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial, DoubleNotEqual = X86Assembler::ConditionNE, DoubleGreaterThan = X86Assembler::ConditionA, DoubleGreaterThanOrEqual = X86Assembler::ConditionAE, - DoubleLessThan = X86Assembler::ConditionB, - DoubleLessThanOrEqual = X86Assembler::ConditionBE, + DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert, + DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert, + // If either operand is NaN, these conditions always evaluate to true. + DoubleEqualOrUnordered = X86Assembler::ConditionE, + DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial, + DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert, + DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert, + DoubleLessThanOrUnordered = X86Assembler::ConditionB, + DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE, }; + COMPILE_ASSERT( + !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits), + DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes); static const RegisterID stackPointerRegister = X86Registers::esp; @@ -416,20 +431,35 @@ public: void convertInt32ToDouble(Address src, FPRegisterID dest) { + ASSERT(isSSE2Present()); m_assembler.cvtsi2sd_mr(src.offset, src.base, dest); } Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) { ASSERT(isSSE2Present()); - m_assembler.ucomisd_rr(right, left); - return Jump(m_assembler.jCC(x86Condition(cond))); - } - Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right) - { - m_assembler.ucomisd_mr(right.offset, right.base, left); - return Jump(m_assembler.jCC(x86Condition(cond))); + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + + if (cond == DoubleEqual) { + Jump isUnordered(m_assembler.jp()); + Jump result = Jump(m_assembler.je()); + isUnordered.link(this); + return result; + } else if (cond == DoubleNotEqualOrUnordered) { + Jump isUnordered(m_assembler.jp()); + Jump isEqual(m_assembler.je()); + isUnordered.link(this); + Jump result = jump(); + isEqual.link(this); + return result; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); } // Truncates 'src' to an integer, and places the resulting 'dest'. @@ -443,6 +473,25 @@ public: return branch32(Equal, dest, Imm32(0x80000000)); } + // Convert 'src' to an integer, and places the resulting 'dest'. + // If the result is not representable as a 32 bit value, branch. + // May also branch for some values that are representable in 32 bits + // (specifically, in this case, 0). + void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) + { + ASSERT(isSSE2Present()); + m_assembler.cvttsd2si_rr(src, dest); + + // If the result is zero, it might have been -0.0, and the double comparison won't catch this! + failureCases.append(branchTest32(Zero, dest)); + + // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. + convertInt32ToDouble(dest, fpTemp); + m_assembler.ucomisd_rr(fpTemp, src); + failureCases.append(m_assembler.jp()); + failureCases.append(m_assembler.jne()); + } + void zeroDouble(FPRegisterID srcDest) { ASSERT(isSSE2Present()); @@ -509,7 +558,8 @@ public: void swap(RegisterID reg1, RegisterID reg2) { - m_assembler.xchgq_rr(reg1, reg2); + if (reg1 != reg2) + m_assembler.xchgq_rr(reg1, reg2); } void signExtend32ToPtr(RegisterID src, RegisterID dest) @@ -889,11 +939,6 @@ protected: return static_cast<X86Assembler::Condition>(cond); } - X86Assembler::Condition x86Condition(DoubleCondition cond) - { - return static_cast<X86Assembler::Condition>(cond); - } - private: // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'. diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h index 0f95fe6..7828cf5 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -192,33 +192,6 @@ public: m_assembler.orq_ir(imm.m_value, dest); } - void rshiftPtr(RegisterID shift_amount, RegisterID dest) - { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86Registers::ecx) { - swap(shift_amount, X86Registers::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarq_CLr(X86Registers::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86Registers::ecx) - m_assembler.sarq_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarq_CLr(dest); - - swap(shift_amount, X86Registers::ecx); - } else - m_assembler.sarq_CLr(dest); - } - - void rshiftPtr(Imm32 imm, RegisterID dest) - { - m_assembler.sarq_i8r(imm.m_value, dest); - } - void subPtr(RegisterID src, RegisterID dest) { m_assembler.subq_rr(src, dest); |