diff options
author | Steve Block <steveblock@google.com> | 2009-10-08 17:19:54 +0100 |
---|---|---|
committer | Steve Block <steveblock@google.com> | 2009-10-20 00:41:58 +0100 |
commit | 231d4e3152a9c27a73b6ac7badbe6be673aa3ddf (patch) | |
tree | a6c7e2d6cd7bfa7011cc39abbb436142d7a4a7c8 /JavaScriptCore/assembler | |
parent | e196732677050bd463301566a68a643b6d14b907 (diff) | |
download | external_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.zip external_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.tar.gz external_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.tar.bz2 |
Merge webkit.org at R49305 : Automatic merge by git.
Change-Id: I8968561bc1bfd72b8923b7118d3728579c6dbcc7
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.cpp | 65 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.h | 156 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMv7Assembler.h | 119 | ||||
-rw-r--r-- | JavaScriptCore/assembler/AbstractMacroAssembler.h | 10 | ||||
-rw-r--r-- | JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h | 25 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssembler.h | 4 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARM.cpp | 94 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARM.h | 287 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARMv7.h | 29 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerCodeRef.h | 6 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86Common.h | 37 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86_64.h | 58 | ||||
-rw-r--r-- | JavaScriptCore/assembler/X86Assembler.h | 29 |
13 files changed, 525 insertions, 394 deletions
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp index 69daa16..1324586 100644 --- a/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/JavaScriptCore/assembler/ARMAssembler.cpp @@ -26,7 +26,7 @@ #include "config.h" -#if ENABLE(ASSEMBLER) && PLATFORM(ARM) +#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) #include "ARMAssembler.h" @@ -49,11 +49,11 @@ ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool) return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK)); } -void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to) +void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool) { ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord)); - if (!from.m_latePatch) { + if (!useConstantPool) { int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2); if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { @@ -291,10 +291,10 @@ void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID bas if (offset <= 0xfff) dtr_u(isLoad, srcDst, base, offset); else if (offset <= 0xfffff) { - add_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); - dtr_u(isLoad, srcDst, ARM::S0, offset & 0xfff); + add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); + dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); } else { - ARMWord reg = getImm(offset, ARM::S0); + ARMWord reg = getImm(offset, ARMRegisters::S0); dtr_ur(isLoad, srcDst, base, reg); } } else { @@ -302,10 +302,10 @@ void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID bas if (offset <= 0xfff) dtr_d(isLoad, srcDst, base, offset); else if (offset <= 0xfffff) { - sub_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); - dtr_d(isLoad, srcDst, ARM::S0, offset & 0xfff); + sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); + dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); } else { - ARMWord reg = getImm(offset, ARM::S0); + ARMWord reg = getImm(offset, ARMRegisters::S0); dtr_dr(isLoad, srcDst, base, reg); } } @@ -319,19 +319,19 @@ void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterI op2 = lsl(index, scale); if (offset >= 0 && offset <= 0xfff) { - add_r(ARM::S0, base, op2); - dtr_u(isLoad, srcDst, ARM::S0, offset); + add_r(ARMRegisters::S0, base, op2); + dtr_u(isLoad, srcDst, ARMRegisters::S0, offset); return; } if (offset <= 0 && offset >= -0xfff) { - add_r(ARM::S0, base, op2); - dtr_d(isLoad, srcDst, ARM::S0, -offset); + add_r(ARMRegisters::S0, base, op2); + dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset); return; } - ldr_un_imm(ARM::S0, offset); - add_r(ARM::S0, ARM::S0, op2); - dtr_ur(isLoad, srcDst, base, ARM::S0); + ldr_un_imm(ARMRegisters::S0, offset); + add_r(ARMRegisters::S0, ARMRegisters::S0, op2); + dtr_ur(isLoad, srcDst, base, ARMRegisters::S0); } void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset) @@ -342,8 +342,8 @@ void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID b return; } if (offset <= 0x3ffff && offset >= 0) { - add_r(ARM::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); - fdtr_u(isLoad, srcDst, ARM::S0, (offset >> 2) & 0xff); + add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); + fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); return; } offset = -offset; @@ -353,27 +353,36 @@ void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID b return; } if (offset <= 0x3ffff && offset >= 0) { - sub_r(ARM::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); - fdtr_d(isLoad, srcDst, ARM::S0, (offset >> 2) & 0xff); + sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); + fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); return; } offset = -offset; } - ldr_un_imm(ARM::S0, offset); - add_r(ARM::S0, ARM::S0, base); - fdtr_u(isLoad, srcDst, ARM::S0, 0); + ldr_un_imm(ARMRegisters::S0, offset); + add_r(ARMRegisters::S0, ARMRegisters::S0, base); + fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0); } void* ARMAssembler::executableCopy(ExecutablePool* allocator) { + // 64-bit alignment is required for next constant pool and JIT code as well + m_buffer.flushWithoutBarrier(true); + if (m_buffer.uncheckedSize() & 0x7) + bkpt(0); + char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator)); for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { - ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + *iter); - ARMWord* offset = getLdrImmAddress(ldrAddr); - if (*offset != 0xffffffff) - linkBranch(data, JmpSrc(*iter), data + *offset); + // The last bit is set if the constant must be placed on constant pool. + int pos = (*iter) & (~0x1); + ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos); + ARMWord offset = *getLdrImmAddress(ldrAddr); + if (offset != 0xffffffff) { + JmpSrc jmpSrc(pos); + linkBranch(data, jmpSrc, data + offset, ((*iter) & 1)); + } } return data; @@ -381,4 +390,4 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator) } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM) +#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) diff --git a/JavaScriptCore/assembler/ARMAssembler.h b/JavaScriptCore/assembler/ARMAssembler.h index d3fe782..9f9a450 100644 --- a/JavaScriptCore/assembler/ARMAssembler.h +++ b/JavaScriptCore/assembler/ARMAssembler.h @@ -29,55 +29,55 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(ARM) +#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) #include "AssemblerBufferWithConstantPool.h" #include <wtf/Assertions.h> namespace JSC { -typedef uint32_t ARMWord; - -namespace ARM { - typedef enum { - r0 = 0, - r1, - r2, - r3, - S0 = r3, - r4, - r5, - r6, - r7, - r8, - S1 = r8, - r9, - r10, - r11, - r12, - r13, - sp = r13, - r14, - lr = r14, - r15, - pc = r15 - } RegisterID; - - typedef enum { - d0, - d1, - d2, - d3, - SD0 = d3 - } FPRegisterID; - -} // namespace ARM + typedef uint32_t ARMWord; + + namespace ARMRegisters { + typedef enum { + r0 = 0, + r1, + r2, + r3, + S0 = r3, + r4, + r5, + r6, + r7, + r8, + S1 = r8, + r9, + r10, + r11, + r12, + r13, + sp = r13, + r14, + lr = r14, + r15, + pc = r15 + } RegisterID; + + typedef enum { + d0, + d1, + d2, + d3, + SD0 = d3 + } FPRegisterID; + + } // namespace ARMRegisters class ARMAssembler { public: - typedef ARM::RegisterID RegisterID; - typedef ARM::FPRegisterID FPRegisterID; + typedef ARMRegisters::RegisterID RegisterID; + typedef ARMRegisters::FPRegisterID FPRegisterID; typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer; - typedef WTF::SegmentedVector<int, 64> Jumps; + typedef SegmentedVector<int, 64> Jumps; ARMAssembler() { } @@ -180,20 +180,16 @@ namespace ARM { public: JmpSrc() : m_offset(-1) - , m_latePatch(false) { } - void enableLatePatch() { m_latePatch = true; } private: JmpSrc(int offset) : m_offset(offset) - , m_latePatch(false) { } - int m_offset : 31; - int m_latePatch : 1; + int m_offset; }; class JmpDst { @@ -334,12 +330,12 @@ namespace ARM { void mov_r(int rd, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARM::r0, op2); + emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2); } void movs_r(int rd, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARM::r0, op2); + emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2); } void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL) @@ -354,12 +350,12 @@ namespace ARM { void mvn_r(int rd, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARM::r0, op2); + emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARMRegisters::r0, op2); } void mvns_r(int rd, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARM::r0, op2); + emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARMRegisters::r0, op2); } void mul_r(int rd, int rn, int rm, Condition cc = AL) @@ -399,12 +395,12 @@ namespace ARM { void ldr_imm(int rd, ARMWord imm, Condition cc = AL) { - m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm, true); + m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true); } void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL) { - m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm); + m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm); } void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL) @@ -462,23 +458,23 @@ namespace ARM { void push_r(int reg, Condition cc = AL) { ASSERT(ARMWord(reg) <= 0xf); - m_buffer.putInt(cc | DTR | DT_WB | RN(ARM::sp) | RD(reg) | 0x4); + m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4); } void pop_r(int reg, Condition cc = AL) { ASSERT(ARMWord(reg) <= 0xf); - m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARM::sp) | RD(reg) | 0x4); + m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4); } inline void poke_r(int reg, Condition cc = AL) { - dtr_d(false, ARM::sp, 0, reg, cc); + dtr_d(false, ARMRegisters::sp, 0, reg, cc); } inline void peek_r(int reg, Condition cc = AL) { - dtr_u(true, reg, ARM::sp, 0, cc); + dtr_u(true, reg, ARMRegisters::sp, 0, cc); } void fmsr_r(int dd, int rn, Condition cc = AL) @@ -509,49 +505,49 @@ namespace ARM { m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf)); #else // Cannot access to Zero memory address - dtr_dr(true, ARM::S0, ARM::S0, ARM::S0); + dtr_dr(true, ARMRegisters::S0, ARMRegisters::S0, ARMRegisters::S0); #endif } static ARMWord lsl(int reg, ARMWord value) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); ASSERT(value <= 0x1f); return reg | (value << 7) | 0x00; } static ARMWord lsr(int reg, ARMWord value) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); ASSERT(value <= 0x1f); return reg | (value << 7) | 0x20; } static ARMWord asr(int reg, ARMWord value) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); ASSERT(value <= 0x1f); return reg | (value << 7) | 0x40; } static ARMWord lsl_r(int reg, int shiftReg) { - ASSERT(reg <= ARM::pc); - ASSERT(shiftReg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); + ASSERT(shiftReg <= ARMRegisters::pc); return reg | (shiftReg << 8) | 0x10; } static ARMWord lsr_r(int reg, int shiftReg) { - ASSERT(reg <= ARM::pc); - ASSERT(shiftReg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); + ASSERT(shiftReg <= ARMRegisters::pc); return reg | (shiftReg << 8) | 0x30; } static ARMWord asr_r(int reg, int shiftReg) { - ASSERT(reg <= ARM::pc); - ASSERT(shiftReg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); + ASSERT(shiftReg <= ARMRegisters::pc); return reg | (shiftReg << 8) | 0x50; } @@ -567,6 +563,11 @@ namespace ARM { m_buffer.ensureSpace(insnSpace, constSpace); } + int sizeOfConstantPool() + { + return m_buffer.sizeOfConstantPool(); + } + JmpDst label() { return JmpDst(m_buffer.size()); @@ -575,16 +576,17 @@ namespace ARM { JmpDst align(int alignment) { while (!m_buffer.isAligned(alignment)) - mov_r(ARM::r0, ARM::r0); + mov_r(ARMRegisters::r0, ARMRegisters::r0); return label(); } - JmpSrc jmp(Condition cc = AL) + JmpSrc jmp(Condition cc = AL, int useConstantPool = 0) { - int s = size(); - ldr_un_imm(ARM::pc, 0xffffffff, cc); - m_jumps.append(s); + ensureSpace(sizeof(ARMWord), sizeof(ARMWord)); + int s = m_buffer.uncheckedSize(); + ldr_un_imm(ARMRegisters::pc, 0xffffffff, cc); + m_jumps.append(s | (useConstantPool & 0x1)); return JmpSrc(s); } @@ -593,7 +595,7 @@ namespace ARM { // Patching helpers static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0); - static void linkBranch(void* code, JmpSrc from, void* to); + static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0); static void patchPointerInternal(intptr_t from, void* to) { @@ -660,7 +662,7 @@ namespace ARM { static void linkCall(void* code, JmpSrc from, void* to) { - linkBranch(code, from, to); + linkBranch(code, from, to, true); } static void relinkCall(void* from, void* to) @@ -727,25 +729,25 @@ namespace ARM { private: ARMWord RM(int reg) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); return reg; } ARMWord RS(int reg) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); return reg << 8; } ARMWord RD(int reg) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); return reg << 12; } ARMWord RN(int reg) { - ASSERT(reg <= ARM::pc); + ASSERT(reg <= ARMRegisters::pc); return reg << 16; } @@ -762,6 +764,6 @@ namespace ARM { } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM) +#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) #endif // ARMAssembler_h diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h index f7e2fb4..078de44 100644 --- a/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/JavaScriptCore/assembler/ARMv7Assembler.h @@ -28,7 +28,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7) +#if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2) #include "AssemblerBuffer.h" #include <wtf/Assertions.h> @@ -37,7 +37,7 @@ namespace JSC { -namespace ARM { +namespace ARMRegisters { typedef enum { r0, r1, @@ -199,7 +199,7 @@ class ARMThumbImmediate { }; } PatternBytes; - ALWAYS_INLINE static int32_t countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N) + ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N) { if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \ value >>= N; /* if any were set, lose the bottom N */ \ @@ -407,8 +407,8 @@ register writeback class ARMv7Assembler { public: - typedef ARM::RegisterID RegisterID; - typedef ARM::FPRegisterID FPRegisterID; + typedef ARMRegisters::RegisterID RegisterID; + typedef ARMRegisters::FPRegisterID FPRegisterID; // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) @@ -442,7 +442,6 @@ public: { } - void enableLatePatch() { } private: JmpSrc(int offset) : m_offset(offset) @@ -481,7 +480,7 @@ private: // ARMv7, Appx-A.6.3 bool BadReg(RegisterID reg) { - return (reg == ARM::sp) || (reg == ARM::pc); + return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc); } bool isSingleRegister(FPRegisterID reg) @@ -693,16 +692,16 @@ public: void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if (rn == ARM::sp) { + if (rn == ARMRegisters::sp) { if (!(rd & 8) && imm.isUInt10()) { m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2); return; - } else if ((rd == ARM::sp) && imm.isUInt9()) { + } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) { m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2); return; } @@ -726,9 +725,9 @@ public: void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } @@ -750,9 +749,9 @@ public: void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isEncodedImm()); if (!((rd | rn) & 8)) { @@ -771,9 +770,9 @@ public: // Not allowed in an IT (if then) block? void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } @@ -839,7 +838,7 @@ public: // Only allowed in IT (if then) block if last instruction. JmpSrc blx(RegisterID rm) { - ASSERT(rm != ARM::pc); + ASSERT(rm != ARMRegisters::pc); m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8); return JmpSrc(m_formatter.size()); } @@ -858,7 +857,7 @@ public: void cmn(RegisterID rn, ARMThumbImmediate imm) { - ASSERT(rn != ARM::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isEncodedImm()); m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm); @@ -866,7 +865,7 @@ public: void cmp(RegisterID rn, ARMThumbImmediate imm) { - ASSERT(rn != ARM::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isEncodedImm()); if (!(rn & 8) && imm.isUInt8()) @@ -877,7 +876,7 @@ public: void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { - ASSERT(rn != ARM::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); } @@ -939,15 +938,15 @@ public: m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if)); } - // rt == ARM::pc only allowed if last instruction in IT (if then) block. + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { - ASSERT(rn != ARM::pc); // LDR (literal) + ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); if (!((rt | rn) & 8) && imm.isUInt7()) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); - else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10()) + else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); @@ -966,8 +965,8 @@ public: // if (wback) REG[rn] = _tmp void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { - ASSERT(rt != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(index || wback); ASSERT(!wback | (rt != rn)); @@ -986,10 +985,10 @@ public: m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset); } - // rt == ARM::pc only allowed if last instruction in IT (if then) block. + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) { - ASSERT(rn != ARM::pc); // LDR (literal) + ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(!BadReg(rm)); ASSERT(shift <= 3); @@ -999,10 +998,10 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm)); } - // rt == ARM::pc only allowed if last instruction in IT (if then) block. + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { - ASSERT(rn != ARM::pc); // LDR (literal) + ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); if (!((rt | rn) & 8) && imm.isUInt6()) @@ -1024,8 +1023,8 @@ public: // if (wback) REG[rn] = _tmp void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { - ASSERT(rt != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(index || wback); ASSERT(!wback | (rt != rn)); @@ -1047,7 +1046,7 @@ public: void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) { ASSERT(!BadReg(rt)); // Memory hint - ASSERT(rn != ARM::pc); // LDRH (literal) + ASSERT(rn != ARMRegisters::pc); // LDRH (literal) ASSERT(!BadReg(rm)); ASSERT(shift <= 3); @@ -1198,16 +1197,16 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm)); } - // rt == ARM::pc only allowed if last instruction in IT (if then) block. + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { - ASSERT(rt != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isUInt12()); if (!((rt | rn) & 8) && imm.isUInt7()) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt); - else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10()) + else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12()); @@ -1226,8 +1225,8 @@ public: // if (wback) REG[rn] = _tmp void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { - ASSERT(rt != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(index || wback); ASSERT(!wback | (rt != rn)); @@ -1246,10 +1245,10 @@ public: m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset); } - // rt == ARM::pc only allowed if last instruction in IT (if then) block. + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) { - ASSERT(rn != ARM::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); ASSERT(shift <= 3); @@ -1262,12 +1261,12 @@ public: void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) { + if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); return; } else if (!((rd | rn) & 8)) { @@ -1290,9 +1289,9 @@ public: void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } @@ -1310,12 +1309,12 @@ public: void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) { + if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); return; } else if (!((rd | rn) & 8)) { @@ -1334,9 +1333,9 @@ public: // Not allowed in an IT (if then) block? void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { - ASSERT((rd != ARM::sp) || (rn == ARM::sp)); - ASSERT(rd != ARM::pc); - ASSERT(rn != ARM::pc); + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } @@ -1754,6 +1753,6 @@ private: } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7) +#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2) #endif // ARMAssembler_h diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h index f927ed2..525fe98 100644 --- a/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -320,11 +320,6 @@ public: return Call(jump.m_jmp, Linkable); } - void enableLatePatch() - { - m_jmp.enableLatePatch(); - } - JmpSrc m_jmp; private: Flags m_flags; @@ -361,11 +356,6 @@ public: masm->m_assembler.linkJump(m_jmp, label.m_label); } - void enableLatePatch() - { - m_jmp.enableLatePatch(); - } - private: JmpSrc m_jmp; }; diff --git a/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h index f15b7f3..af3c3be 100644 --- a/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h +++ b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h @@ -34,6 +34,8 @@ #include "AssemblerBuffer.h" #include <wtf/SegmentedVector.h> +#define ASSEMBLER_HAS_CONSTANT_POOL 1 + namespace JSC { /* @@ -84,7 +86,7 @@ namespace JSC { template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType> class AssemblerBufferWithConstantPool: public AssemblerBuffer { - typedef WTF::SegmentedVector<uint32_t, 512> LoadOffsets; + typedef SegmentedVector<uint32_t, 512> LoadOffsets; public: enum { UniqueConst, @@ -177,6 +179,11 @@ public: return AssemblerBuffer::size(); } + int uncheckedSize() + { + return AssemblerBuffer::size(); + } + void* executableCopy(ExecutablePool* allocator) { flushConstantPool(false); @@ -207,10 +214,10 @@ public: } // This flushing mechanism can be called after any unconditional jumps. - void flushWithoutBarrier() + void flushWithoutBarrier(bool isForced = false) { // Flush if constant pool is more than 60% full to avoid overuse of this function. - if (5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t)) + if (isForced || 5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t)) flushConstantPool(false); } @@ -219,6 +226,11 @@ public: return m_pool; } + int sizeOfConstantPool() + { + return m_numConsts; + } + private: void correctDeltas(int insnSize) { @@ -276,7 +288,8 @@ private: { if (m_numConsts == 0) return; - if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t))) + int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0; + if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t))) flushConstantPool(); } @@ -284,8 +297,8 @@ private: { if (m_numConsts == 0) return; - if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t)) || - (m_numConsts + nextConstSize / sizeof(uint32_t) >= maxPoolSize)) + if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) || + (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize)) flushConstantPool(); } diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h index 9e1c5d3..2743ab4 100644 --- a/JavaScriptCore/assembler/MacroAssembler.h +++ b/JavaScriptCore/assembler/MacroAssembler.h @@ -30,11 +30,11 @@ #if ENABLE(ASSEMBLER) -#if PLATFORM_ARM_ARCH(7) +#if PLATFORM(ARM_THUMB2) #include "MacroAssemblerARMv7.h" namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; -#elif PLATFORM(ARM) +#elif PLATFORM(ARM_TRADITIONAL) #include "MacroAssemblerARM.h" namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/JavaScriptCore/assembler/MacroAssemblerARM.cpp new file mode 100644 index 0000000..d726ecd --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerARM.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2009 University of Szeged + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) + +#include "MacroAssemblerARM.h" + +#if PLATFORM(LINUX) +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <elf.h> +#include <asm/hwcap.h> +#endif + +namespace JSC { + +static bool isVFPPresent() +{ +#if PLATFORM(LINUX) + int fd = open("/proc/self/auxv", O_RDONLY); + if (fd > 0) { + Elf32_auxv_t aux; + while (read(fd, &aux, sizeof(Elf32_auxv_t))) { + if (aux.a_type == AT_HWCAP) { + close(fd); + return aux.a_un.a_val & HWCAP_VFP; + } + } + close(fd); + } +#endif + + return false; +} + +const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent(); + +#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT +void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) +{ + ARMWord op2; + + ASSERT(address.scale >= 0 && address.scale <= 3); + op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale)); + + if (address.offset >= 0 && address.offset + 0x2 <= 0xff) { + m_assembler.add_r(ARMRegisters::S0, address.base, op2); + m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset)); + m_assembler.ldrh_u(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset + 0x2)); + } else if (address.offset < 0 && address.offset >= -0xff) { + m_assembler.add_r(ARMRegisters::S0, address.base, op2); + m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset)); + m_assembler.ldrh_d(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset - 0x2)); + } else { + m_assembler.ldr_un_imm(ARMRegisters::S0, address.offset); + m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, op2); + m_assembler.ldrh_r(dest, address.base, ARMRegisters::S0); + m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::OP2_IMM | 0x2); + m_assembler.ldrh_r(ARMRegisters::S0, address.base, ARMRegisters::S0); + } + m_assembler.orr_r(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16)); +} +#endif + +} + +#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.h b/JavaScriptCore/assembler/MacroAssemblerARM.h index b04ed13..aa8cbb0 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -30,7 +30,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(ARM) +#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) #include "ARMAssembler.h" #include "AbstractMacroAssembler.h" @@ -64,7 +64,7 @@ public: DoubleLessThanOrEqual = ARMAssembler::LE, }; - static const RegisterID stackPointerRegister = ARM::sp; + static const RegisterID stackPointerRegister = ARMRegisters::sp; static const Scale ScalePtr = TimesFour; @@ -75,20 +75,20 @@ public: void add32(Imm32 imm, Address address) { - load32(address, ARM::S1); - add32(imm, ARM::S1); - store32(ARM::S1, address); + load32(address, ARMRegisters::S1); + add32(imm, ARMRegisters::S1); + store32(ARMRegisters::S1, address); } void add32(Imm32 imm, RegisterID dest) { - m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0)); + m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void add32(Address src, RegisterID dest) { - load32(src, ARM::S1); - add32(ARM::S1, dest); + load32(src, ARMRegisters::S1); + add32(ARMRegisters::S1, dest); } void and32(RegisterID src, RegisterID dest) @@ -98,7 +98,7 @@ public: void and32(Imm32 imm, RegisterID dest) { - ARMWord w = m_assembler.getImm(imm.m_value, ARM::S0, true); + ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true); if (w & ARMAssembler::OP2_INV_IMM) m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM); else @@ -118,16 +118,16 @@ public: void mul32(RegisterID src, RegisterID dest) { if (src == dest) { - move(src, ARM::S0); - src = ARM::S0; + move(src, ARMRegisters::S0); + src = ARMRegisters::S0; } m_assembler.muls_r(dest, dest, src); } void mul32(Imm32 imm, RegisterID src, RegisterID dest) { - move(imm, ARM::S0); - m_assembler.muls_r(dest, src, ARM::S0); + move(imm, ARMRegisters::S0); + m_assembler.muls_r(dest, src, ARMRegisters::S0); } void not32(RegisterID dest) @@ -142,7 +142,7 @@ public: void or32(Imm32 imm, RegisterID dest) { - m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0)); + m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void rshift32(RegisterID shift_amount, RegisterID dest) @@ -162,20 +162,20 @@ public: void sub32(Imm32 imm, RegisterID dest) { - m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0)); + m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void sub32(Imm32 imm, Address address) { - load32(address, ARM::S1); - sub32(imm, ARM::S1); - store32(ARM::S1, address); + load32(address, ARMRegisters::S1); + sub32(imm, ARMRegisters::S1); + store32(ARMRegisters::S1, address); } void sub32(Address src, RegisterID dest) { - load32(src, ARM::S1); - sub32(ARM::S1, dest); + load32(src, ARMRegisters::S1); + sub32(ARMRegisters::S1, dest); } void xor32(RegisterID src, RegisterID dest) @@ -185,7 +185,7 @@ public: void xor32(Imm32 imm, RegisterID dest) { - m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0)); + m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void load32(ImplicitAddress address, RegisterID dest) @@ -198,11 +198,20 @@ public: m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } +#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest); +#else + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) + { + load32(address, dest); + } +#endif + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) { DataLabel32 dataLabel(this); - m_assembler.ldr_un_imm(ARM::S0, 0); - m_assembler.dtr_ur(true, dest, address.base, ARM::S0); + m_assembler.ldr_un_imm(ARMRegisters::S0, 0); + m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0); return dataLabel; } @@ -215,18 +224,18 @@ public: void load16(BaseIndex address, RegisterID dest) { - m_assembler.add_r(ARM::S0, address.base, m_assembler.lsl(address.index, address.scale)); + m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale)); if (address.offset>=0) - m_assembler.ldrh_u(dest, ARM::S0, ARMAssembler::getOp2Byte(address.offset)); + m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset)); else - m_assembler.ldrh_d(dest, ARM::S0, ARMAssembler::getOp2Byte(-address.offset)); + m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset)); } DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { DataLabel32 dataLabel(this); - m_assembler.ldr_un_imm(ARM::S0, 0); - m_assembler.dtr_ur(false, src, address.base, ARM::S0); + m_assembler.ldr_un_imm(ARMRegisters::S0, 0); + m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0); return dataLabel; } @@ -243,26 +252,26 @@ public: void store32(Imm32 imm, ImplicitAddress address) { if (imm.m_isPointer) - m_assembler.ldr_un_imm(ARM::S1, imm.m_value); + m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value); else - move(imm, ARM::S1); - store32(ARM::S1, address); + move(imm, ARMRegisters::S1); + store32(ARMRegisters::S1, address); } void store32(RegisterID src, void* address) { - m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address)); - m_assembler.dtr_u(false, src, ARM::S0, 0); + m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); + m_assembler.dtr_u(false, src, ARMRegisters::S0, 0); } void store32(Imm32 imm, void* address) { - m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address)); + m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); if (imm.m_isPointer) - m_assembler.ldr_un_imm(ARM::S1, imm.m_value); + m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value); else - m_assembler.moveImm(imm.m_value, ARM::S1); - m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0); + m_assembler.moveImm(imm.m_value, ARMRegisters::S1); + m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); } void pop(RegisterID dest) @@ -277,14 +286,14 @@ public: void push(Address address) { - load32(address, ARM::S1); - push(ARM::S1); + load32(address, ARMRegisters::S1); + push(ARMRegisters::S1); } void push(Imm32 imm) { - move(imm, ARM::S0); - push(ARM::S0); + move(imm, ARMRegisters::S0); + push(ARMRegisters::S0); } void move(Imm32 imm, RegisterID dest) @@ -307,9 +316,9 @@ public: void swap(RegisterID reg1, RegisterID reg2) { - m_assembler.mov_r(ARM::S0, reg1); + m_assembler.mov_r(ARMRegisters::S0, reg1); m_assembler.mov_r(reg1, reg2); - m_assembler.mov_r(reg2, ARM::S0); + m_assembler.mov_r(reg2, ARMRegisters::S0); } void signExtend32ToPtr(RegisterID src, RegisterID dest) @@ -324,44 +333,50 @@ public: move(src, dest); } - Jump branch32(Condition cond, RegisterID left, RegisterID right) + Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0) { m_assembler.cmp_r(left, right); - return Jump(m_assembler.jmp(ARMCondition(cond))); + return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool)); } - Jump branch32(Condition cond, RegisterID left, Imm32 right) + Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0) { if (right.m_isPointer) { - m_assembler.ldr_un_imm(ARM::S0, right.m_value); - m_assembler.cmp_r(left, ARM::S0); + m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value); + m_assembler.cmp_r(left, ARMRegisters::S0); } else - m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0)); - return Jump(m_assembler.jmp(ARMCondition(cond))); + m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); + return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool)); } Jump branch32(Condition cond, RegisterID left, Address right) { - load32(right, ARM::S1); - return branch32(cond, left, ARM::S1); + load32(right, ARMRegisters::S1); + return branch32(cond, left, ARMRegisters::S1); } Jump branch32(Condition cond, Address left, RegisterID right) { - load32(left, ARM::S1); - return branch32(cond, ARM::S1, right); + load32(left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); } Jump branch32(Condition cond, Address left, Imm32 right) { - load32(left, ARM::S1); - return branch32(cond, ARM::S1, right); + load32(left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); } Jump branch32(Condition cond, BaseIndex left, Imm32 right) { - load32(left, ARM::S1); - return branch32(cond, ARM::S1, right); + load32(left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); + } + + Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) + { + load32WithUnalignedHalfWords(left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); } Jump branch16(Condition cond, BaseIndex left, RegisterID right) @@ -375,9 +390,9 @@ public: Jump branch16(Condition cond, BaseIndex left, Imm32 right) { - load16(left, ARM::S0); - move(right, ARM::S1); - m_assembler.cmp_r(ARM::S0, ARM::S1); + load16(left, ARMRegisters::S0); + move(right, ARMRegisters::S1); + m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1); return m_assembler.jmp(ARMCondition(cond)); } @@ -391,9 +406,9 @@ public: Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) { ASSERT((cond == Zero) || (cond == NonZero)); - ARMWord w = m_assembler.getImm(mask.m_value, ARM::S0, true); + ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true); if (w & ARMAssembler::OP2_INV_IMM) - m_assembler.bics_r(ARM::S0, reg, w & ~ARMAssembler::OP2_INV_IMM); + m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM); else m_assembler.tst_r(reg, w); return Jump(m_assembler.jmp(ARMCondition(cond))); @@ -401,14 +416,14 @@ public: Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) { - load32(address, ARM::S1); - return branchTest32(cond, ARM::S1, mask); + load32(address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask); } Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) { - load32(address, ARM::S1); - return branchTest32(cond, ARM::S1, mask); + load32(address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask); } Jump jump() @@ -418,12 +433,12 @@ public: void jump(RegisterID target) { - move(target, ARM::pc); + move(target, ARMRegisters::pc); } void jump(Address address) { - load32(address, ARM::pc); + load32(address, ARMRegisters::pc); } Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) @@ -443,11 +458,11 @@ public: void mull32(RegisterID src1, RegisterID src2, RegisterID dest) { if (src1 == dest) { - move(src1, ARM::S0); - src1 = ARM::S0; + move(src1, ARMRegisters::S0); + src1 = ARMRegisters::S0; } - m_assembler.mull_r(ARM::S1, dest, src2, src1); - m_assembler.cmp_r(ARM::S1, m_assembler.asr(dest, 31)); + m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1); + m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31)); } Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) @@ -466,8 +481,8 @@ public: { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) { - move(imm, ARM::S0); - mull32(ARM::S0, src, dest); + move(imm, ARMRegisters::S0); + mull32(ARMRegisters::S0, src, dest); cond = NonZero; } else @@ -497,13 +512,13 @@ public: Call nearCall() { prepareCall(); - return Call(m_assembler.jmp(), Call::LinkableNear); + return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear); } Call call(RegisterID target) { prepareCall(); - move(ARM::pc, target); + move(ARMRegisters::pc, target); JmpSrc jmpSrc; return Call(jmpSrc, Call::None); } @@ -515,7 +530,7 @@ public: void ret() { - pop(ARM::pc); + pop(ARMRegisters::pc); } void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) @@ -527,67 +542,67 @@ public: void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) { - m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0)); + m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); } void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) { - load32(address, ARM::S1); + load32(address, ARMRegisters::S1); if (mask.m_value == -1) - m_assembler.cmp_r(0, ARM::S1); + m_assembler.cmp_r(0, ARMRegisters::S1); else - m_assembler.tst_r(ARM::S1, m_assembler.getImm(mask.m_value, ARM::S0)); + m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); } void add32(Imm32 imm, RegisterID src, RegisterID dest) { - m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARM::S0)); + m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void add32(Imm32 imm, AbsoluteAddress address) { - m_assembler.ldr_un_imm(ARM::S1, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0); - add32(imm, ARM::S1); - m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0); + m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); + m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + add32(imm, ARMRegisters::S1); + m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); + m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); } void sub32(Imm32 imm, AbsoluteAddress address) { - m_assembler.ldr_un_imm(ARM::S1, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0); - sub32(imm, ARM::S1); - m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0); + m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); + m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + sub32(imm, ARMRegisters::S1); + m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); + m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); } void load32(void* address, RegisterID dest) { - m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address)); - m_assembler.dtr_u(true, dest, ARM::S0, 0); + m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); + m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0); } Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) { - load32(left.m_ptr, ARM::S1); - return branch32(cond, ARM::S1, right); + load32(left.m_ptr, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); } Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right) { - load32(left.m_ptr, ARM::S1); - return branch32(cond, ARM::S1, right); + load32(left.m_ptr, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right); } Call call() { prepareCall(); - return Call(m_assembler.jmp(), Call::Linkable); + return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable); } Call tailRecursiveCall() @@ -609,25 +624,23 @@ public: Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) { - dataLabel = moveWithPatch(initialRightValue, ARM::S1); - Jump jump = branch32(cond, left, ARM::S1); - jump.enableLatePatch(); + dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1); + Jump jump = branch32(cond, left, ARMRegisters::S1, true); return jump; } Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) { - load32(left, ARM::S1); - dataLabel = moveWithPatch(initialRightValue, ARM::S0); - Jump jump = branch32(cond, ARM::S0, ARM::S1); - jump.enableLatePatch(); + load32(left, ARMRegisters::S1); + dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0); + Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true); return jump; } DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address) { - DataLabelPtr dataLabel = moveWithPatch(initialValue, ARM::S1); - store32(ARM::S1, address); + DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1); + store32(ARMRegisters::S1, address); return dataLabel; } @@ -639,8 +652,7 @@ public: // Floating point operators bool supportsFloatingPoint() const { - // FIXME: should be a dynamic test: VFP, FPA, or nothing - return false; + return s_isVFPPresent; } bool supportsFloatingPointTruncate() const @@ -665,8 +677,8 @@ public: void addDouble(Address src, FPRegisterID dest) { - loadDouble(src, ARM::SD0); - addDouble(ARM::SD0, dest); + loadDouble(src, ARMRegisters::SD0); + addDouble(ARMRegisters::SD0, dest); } void subDouble(FPRegisterID src, FPRegisterID dest) @@ -676,8 +688,8 @@ public: void subDouble(Address src, FPRegisterID dest) { - loadDouble(src, ARM::SD0); - subDouble(ARM::SD0, dest); + loadDouble(src, ARMRegisters::SD0); + subDouble(ARMRegisters::SD0, dest); } void mulDouble(FPRegisterID src, FPRegisterID dest) @@ -687,8 +699,8 @@ public: void mulDouble(Address src, FPRegisterID dest) { - loadDouble(src, ARM::SD0); - mulDouble(ARM::SD0, dest); + loadDouble(src, ARMRegisters::SD0); + mulDouble(ARMRegisters::SD0, dest); } void convertInt32ToDouble(RegisterID src, FPRegisterID dest) @@ -722,46 +734,56 @@ protected: return static_cast<ARMAssembler::Condition>(cond); } + void ensureSpace(int insnSpace, int constSpace) + { + m_assembler.ensureSpace(insnSpace, constSpace); + } + + int sizeOfConstantPool() + { + return m_assembler.sizeOfConstantPool(); + } + void prepareCall() { - m_assembler.ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord)); + ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord)); // S0 might be used for parameter passing - m_assembler.add_r(ARM::S1, ARM::pc, ARMAssembler::OP2_IMM | 0x4); - m_assembler.push_r(ARM::S1); + m_assembler.add_r(ARMRegisters::S1, ARMRegisters::pc, ARMAssembler::OP2_IMM | 0x4); + m_assembler.push_r(ARMRegisters::S1); } void call32(RegisterID base, int32_t offset) { - if (base == ARM::sp) + if (base == ARMRegisters::sp) offset += 4; if (offset >= 0) { if (offset <= 0xfff) { prepareCall(); - m_assembler.dtr_u(true, ARM::pc, base, offset); + m_assembler.dtr_u(true, ARMRegisters::pc, base, offset); } else if (offset <= 0xfffff) { - m_assembler.add_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); + m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); prepareCall(); - m_assembler.dtr_u(true, ARM::pc, ARM::S0, offset & 0xfff); + m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff); } else { - ARMWord reg = m_assembler.getImm(offset, ARM::S0); + ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0); prepareCall(); - m_assembler.dtr_ur(true, ARM::pc, base, reg); + m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg); } } else { offset = -offset; if (offset <= 0xfff) { prepareCall(); - m_assembler.dtr_d(true, ARM::pc, base, offset); + m_assembler.dtr_d(true, ARMRegisters::pc, base, offset); } else if (offset <= 0xfffff) { - m_assembler.sub_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); + m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); prepareCall(); - m_assembler.dtr_d(true, ARM::pc, ARM::S0, offset & 0xfff); + m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff); } else { - ARMWord reg = m_assembler.getImm(offset, ARM::S0); + ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0); prepareCall(); - m_assembler.dtr_dr(true, ARM::pc, base, reg); + m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg); } } } @@ -785,10 +807,11 @@ private: ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); } + static const bool s_isVFPPresent; }; } -#endif +#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) #endif // MacroAssemblerARM_h diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h index f7a8402..a549604 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -39,9 +39,9 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7? // - dTR is likely used more than aTR, and we'll get better instruction // encoding if it's in the low 8 registers. - static const ARM::RegisterID dataTempRegister = ARM::ip; - static const RegisterID addressTempRegister = ARM::r3; - static const FPRegisterID fpTempRegister = ARM::d7; + static const ARMRegisters::RegisterID dataTempRegister = ARMRegisters::ip; + static const RegisterID addressTempRegister = ARMRegisters::r3; + static const FPRegisterID fpTempRegister = ARMRegisters::d7; struct ArmAddress { enum AddressType { @@ -102,8 +102,8 @@ public: DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS, }; - static const RegisterID stackPointerRegister = ARM::sp; - static const RegisterID linkRegister = ARM::lr; + static const RegisterID stackPointerRegister = ARMRegisters::sp; + static const RegisterID linkRegister = ARMRegisters::lr; // Integer arithmetic operations: // @@ -375,6 +375,11 @@ public: load32(setupArmAddress(address), dest); } + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) + { + load32(setupArmAddress(address), dest); + } + void load32(void* address, RegisterID dest) { move(ImmPtr(address), addressTempRegister); @@ -532,6 +537,7 @@ public: Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID) { ASSERT_NOT_REACHED(); + return jump(); } @@ -546,13 +552,13 @@ public: void pop(RegisterID dest) { // store postindexed with writeback - m_assembler.ldr(dest, ARM::sp, sizeof(void*), false, true); + m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); } void push(RegisterID src) { // store preindexed with writeback - m_assembler.str(src, ARM::sp, -sizeof(void*), true, true); + m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true); } void push(Address address) @@ -716,6 +722,13 @@ public: return branch32(cond, addressTempRegister, right); } + Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) + { + // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ + load32WithUnalignedHalfWords(left, addressTempRegister); + return branch32(cond, addressTempRegister, right); + } + Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) { load32(left.m_ptr, dataTempRegister); @@ -1038,7 +1051,7 @@ protected: return addressTempRegister; } - DataLabel32 moveFixedWidthEncoding(Imm32 imm, RegisterID dst) + void moveFixedWidthEncoding(Imm32 imm, RegisterID dst) { uint32_t value = imm.m_value; m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff)); diff --git a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h index 341a7ff..568260a 100644 --- a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h +++ b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h @@ -37,7 +37,7 @@ // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). -#if PLATFORM_ARM_ARCH(7) +#if PLATFORM(ARM_THUMB2) // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded // into the processor are decorated with the bottom bit set, indicating that this is // thumb code (as oposed to 32-bit traditional ARM). The first test checks for both @@ -124,7 +124,7 @@ public: } explicit MacroAssemblerCodePtr(void* value) -#if PLATFORM_ARM_ARCH(7) +#if PLATFORM(ARM_THUMB2) // Decorate the pointer as a thumb code pointer. : m_value(reinterpret_cast<char*>(value) + 1) #else @@ -141,7 +141,7 @@ public: } void* executableAddress() const { return m_value; } -#if PLATFORM_ARM_ARCH(7) +#if PLATFORM(ARM_THUMB2) // To use this pointer as a data address remove the decoration. void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; } #else diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h index c9e3569..5ebefa7 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -64,7 +64,7 @@ public: DoubleLessThanOrEqual = X86Assembler::ConditionBE, }; - static const RegisterID stackPointerRegister = X86::esp; + static const RegisterID stackPointerRegister = X86Registers::esp; // Integer arithmetic operations: // @@ -132,20 +132,20 @@ public: { // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); + if (shift_amount != X86Registers::ecx) { + swap(shift_amount, X86Registers::ecx); // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" if (dest == shift_amount) - m_assembler.shll_CLr(X86::ecx); + m_assembler.shll_CLr(X86Registers::ecx); // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) + else if (dest == X86Registers::ecx) m_assembler.shll_CLr(shift_amount); // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" else m_assembler.shll_CLr(dest); - swap(shift_amount, X86::ecx); + swap(shift_amount, X86Registers::ecx); } else m_assembler.shll_CLr(dest); } @@ -214,20 +214,20 @@ public: { // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); + if (shift_amount != X86Registers::ecx) { + swap(shift_amount, X86Registers::ecx); // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" if (dest == shift_amount) - m_assembler.sarl_CLr(X86::ecx); + m_assembler.sarl_CLr(X86Registers::ecx); // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) + else if (dest == X86Registers::ecx) m_assembler.sarl_CLr(shift_amount); // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" else m_assembler.sarl_CLr(dest); - swap(shift_amount, X86::ecx); + swap(shift_amount, X86Registers::ecx); } else m_assembler.sarl_CLr(dest); } @@ -306,6 +306,11 @@ public: m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); } + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) + { + load32(address, dest); + } + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) { m_assembler.movl_mr_disp32(address.offset, address.base, dest); @@ -499,10 +504,7 @@ public: void move(ImmPtr imm, RegisterID dest) { - if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr())) - m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest); - else - m_assembler.movq_i64r(imm.asIntptr(), dest); + m_assembler.movq_i64r(imm.asIntptr(), dest); } void swap(RegisterID reg1, RegisterID reg2) @@ -607,6 +609,11 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) + { + return branch32(cond, left, right); + } + Jump branch16(Condition cond, BaseIndex left, RegisterID right) { m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale); diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h index e3d296c..0f95fe6 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -38,7 +38,7 @@ namespace JSC { class MacroAssemblerX86_64 : public MacroAssemblerX86Common { protected: - static const X86::RegisterID scratchRegister = X86::r11; + static const X86Registers::RegisterID scratchRegister = X86Registers::r11; public: static const Scale ScalePtr = TimesEight; @@ -79,12 +79,12 @@ public: void load32(void* address, RegisterID dest) { - if (dest == X86::eax) + if (dest == X86Registers::eax) m_assembler.movl_mEAX(address); else { - move(X86::eax, dest); + move(X86Registers::eax, dest); m_assembler.movl_mEAX(address); - swap(X86::eax, dest); + swap(X86Registers::eax, dest); } } @@ -102,10 +102,10 @@ public: void store32(Imm32 imm, void* address) { - move(X86::eax, scratchRegister); - move(imm, X86::eax); + move(X86Registers::eax, scratchRegister); + move(imm, X86Registers::eax); m_assembler.movl_EAXm(address); - move(scratchRegister, X86::eax); + move(scratchRegister, X86Registers::eax); } Call call() @@ -196,20 +196,20 @@ public: { // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); + if (shift_amount != X86Registers::ecx) { + swap(shift_amount, X86Registers::ecx); // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" if (dest == shift_amount) - m_assembler.sarq_CLr(X86::ecx); + m_assembler.sarq_CLr(X86Registers::ecx); // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) + else if (dest == X86Registers::ecx) m_assembler.sarq_CLr(shift_amount); // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" else m_assembler.sarq_CLr(dest); - swap(shift_amount, X86::ecx); + swap(shift_amount, X86Registers::ecx); } else m_assembler.sarq_CLr(dest); } @@ -258,12 +258,12 @@ public: void loadPtr(void* address, RegisterID dest) { - if (dest == X86::eax) + if (dest == X86Registers::eax) m_assembler.movq_mEAX(address); else { - move(X86::eax, dest); + move(X86Registers::eax, dest); m_assembler.movq_mEAX(address); - swap(X86::eax, dest); + swap(X86Registers::eax, dest); } } @@ -285,24 +285,19 @@ public: void storePtr(RegisterID src, void* address) { - if (src == X86::eax) + if (src == X86Registers::eax) m_assembler.movq_EAXm(address); else { - swap(X86::eax, src); + swap(X86Registers::eax, src); m_assembler.movq_EAXm(address); - swap(X86::eax, src); + swap(X86Registers::eax, src); } } void storePtr(ImmPtr imm, ImplicitAddress address) { - intptr_t ptr = imm.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(ptr)) - m_assembler.movq_i32m(static_cast<int>(ptr), address.offset, address.base); - else { - move(imm, scratchRegister); - storePtr(scratchRegister, address); - } + move(imm, scratchRegister); + storePtr(scratchRegister, address); } DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) @@ -339,17 +334,8 @@ public: Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) { - intptr_t imm = right.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - if (!imm) - m_assembler.testq_rr(left, left); - else - m_assembler.cmpq_ir(imm, left); - return Jump(m_assembler.jCC(x86Condition(cond))); - } else { - move(right, scratchRegister); - return branchPtr(cond, left, scratchRegister); - } + move(right, scratchRegister); + return branchPtr(cond, left, scratchRegister); } Jump branchPtr(Condition cond, RegisterID left, Address right) diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h index fb58361..cbbaaa5 100644 --- a/JavaScriptCore/assembler/X86Assembler.h +++ b/JavaScriptCore/assembler/X86Assembler.h @@ -38,12 +38,8 @@ namespace JSC { inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } -#if PLATFORM(X86_64) -inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; } -inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; } -#endif -namespace X86 { +namespace X86Registers { typedef enum { eax, ecx, @@ -80,8 +76,8 @@ namespace X86 { class X86Assembler { public: - typedef X86::RegisterID RegisterID; - typedef X86::XMMRegisterID XMMRegisterID; + typedef X86Registers::RegisterID RegisterID; + typedef X86Registers::XMMRegisterID XMMRegisterID; typedef XMMRegisterID FPRegisterID; typedef enum { @@ -231,7 +227,6 @@ public: { } - void enableLatePatch() { } private: JmpSrc(int offset) : m_offset(offset) @@ -1119,7 +1114,7 @@ public: #else void movl_rm(RegisterID src, void* addr) { - if (src == X86::eax) + if (src == X86Registers::eax) movl_EAXm(addr); else m_formatter.oneByteOp(OP_MOV_EvGv, src, addr); @@ -1127,7 +1122,7 @@ public: void movl_mr(void* addr, RegisterID dst) { - if (dst == X86::eax) + if (dst == X86Registers::eax) movl_mEAX(addr); else m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr); @@ -1893,23 +1888,23 @@ private: // Internals; ModRm and REX formatters. - static const RegisterID noBase = X86::ebp; - static const RegisterID hasSib = X86::esp; - static const RegisterID noIndex = X86::esp; + static const RegisterID noBase = X86Registers::ebp; + static const RegisterID hasSib = X86Registers::esp; + static const RegisterID noIndex = X86Registers::esp; #if PLATFORM(X86_64) - static const RegisterID noBase2 = X86::r13; - static const RegisterID hasSib2 = X86::r12; + static const RegisterID noBase2 = X86Registers::r13; + static const RegisterID hasSib2 = X86Registers::r12; // Registers r8 & above require a REX prefixe. inline bool regRequiresRex(int reg) { - return (reg >= X86::r8); + return (reg >= X86Registers::r8); } // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). inline bool byteRegRequiresRex(int reg) { - return (reg >= X86::esp); + return (reg >= X86Registers::esp); } // Format a REX prefix byte. |