diff options
author | Steve Block <steveblock@google.com> | 2010-02-02 14:57:50 +0000 |
---|---|---|
committer | Steve Block <steveblock@google.com> | 2010-02-04 15:06:55 +0000 |
commit | d0825bca7fe65beaee391d30da42e937db621564 (patch) | |
tree | 7461c49eb5844ffd1f35d1ba2c8b7584c1620823 /JavaScriptCore/assembler | |
parent | 3db770bd97c5a59b6c7574ca80a39e5a51c1defd (diff) | |
download | external_webkit-d0825bca7fe65beaee391d30da42e937db621564.zip external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.gz external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.bz2 |
Merge webkit.org at r54127 : Initial merge by git
Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.cpp | 58 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMAssembler.h | 49 | ||||
-rw-r--r-- | JavaScriptCore/assembler/ARMv7Assembler.h | 12 | ||||
-rw-r--r-- | JavaScriptCore/assembler/AbstractMacroAssembler.h | 8 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssembler.h | 13 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARM.cpp | 11 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerARM.h | 6 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerCodeRef.h | 6 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86.h | 2 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86Common.h | 12 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86_64.h | 2 | ||||
-rw-r--r-- | JavaScriptCore/assembler/X86Assembler.h | 64 |
12 files changed, 118 insertions, 125 deletions
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp index 81c3222..6dd2b87 100644 --- a/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/JavaScriptCore/assembler/ARMAssembler.cpp @@ -26,7 +26,7 @@ #include "config.h" -#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #include "ARMAssembler.h" @@ -34,39 +34,6 @@ namespace JSC { // Patching helpers -ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool) -{ - // Must be an ldr ..., [pc +/- imm] - ASSERT((*insn & 0x0f7f0000) == 0x051f0000); - - if (constPool && (*insn & 0x1)) - return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1)); - - ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord); - if (*insn & DT_UP) - return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK)); - else - return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK)); -} - -void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool) -{ - ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord)); - - if (!useConstantPool) { - int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2); - - if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { - *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK); - ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord)); - return; - } - } - ARMWord* addr = getLdrImmAddress(insn); - *addr = reinterpret_cast<ARMWord>(to); - ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord)); -} - void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) { ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr); @@ -272,10 +239,8 @@ void ARMAssembler::moveImm(ARMWord imm, int dest) ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) { - ARMWord tmp; - -#if ARM_ARCH_VERSION >= 7 - tmp = getImm16Op2(imm); +#if WTF_ARM_ARCH_AT_LEAST(7) + ARMWord tmp = getImm16Op2(imm); if (tmp != INVALID_IMM) { movw_r(dest, tmp); return dest; @@ -390,10 +355,17 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator) // The last bit is set if the constant must be placed on constant pool. int pos = (*iter) & (~0x1); ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos); - ARMWord offset = *getLdrImmAddress(ldrAddr); - if (offset != 0xffffffff) { - JmpSrc jmpSrc(pos); - linkBranch(data, jmpSrc, data + offset, ((*iter) & 1)); + ARMWord* addr = getLdrImmAddress(ldrAddr); + if (*addr != 0xffffffff) { + if (!(*iter & 1)) { + int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching); + + if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { + *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK); + continue; + } + } + *addr = reinterpret_cast<ARMWord>(data + *addr); } } @@ -402,4 +374,4 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator) } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) diff --git a/JavaScriptCore/assembler/ARMAssembler.h b/JavaScriptCore/assembler/ARMAssembler.h index 712473e..6967b37 100644 --- a/JavaScriptCore/assembler/ARMAssembler.h +++ b/JavaScriptCore/assembler/ARMAssembler.h @@ -29,7 +29,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #include "AssemblerBufferWithConstantPool.h" #include <wtf/Assertions.h> @@ -138,11 +138,11 @@ namespace JSC { FSITOD = 0x0eb80bc0, FTOSID = 0x0ebd0b40, FMSTAT = 0x0ef1fa10, -#if ARM_ARCH_VERSION >= 5 +#if WTF_ARM_ARCH_AT_LEAST(5) CLZ = 0x016f0f10, BKPT = 0xe120070, #endif -#if ARM_ARCH_VERSION >= 7 +#if WTF_ARM_ARCH_AT_LEAST(7) MOVW = 0x03000000, MOVT = 0x03400000, #endif @@ -183,6 +183,7 @@ namespace JSC { }; static const ARMWord INVALID_IMM = 0xf0000000; + static const int DefaultPrefetching = 2; class JmpSrc { friend class ARMAssembler; @@ -342,7 +343,7 @@ namespace JSC { emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2); } -#if ARM_ARCH_VERSION >= 7 +#if WTF_ARM_ARCH_AT_LEAST(7) void movw_r(int rd, ARMWord op2, Condition cc = AL) { ASSERT((op2 | 0xf0fff) == 0xf0fff); @@ -530,7 +531,7 @@ namespace JSC { m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT); } -#if ARM_ARCH_VERSION >= 5 +#if WTF_ARM_ARCH_AT_LEAST(5) void clz_r(int rd, int rm, Condition cc = AL) { m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm)); @@ -539,7 +540,7 @@ namespace JSC { void bkpt(ARMWord value) { -#if ARM_ARCH_VERSION >= 5 +#if WTF_ARM_ARCH_AT_LEAST(5) m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf)); #else // Cannot access to Zero memory address @@ -632,15 +633,32 @@ namespace JSC { // Patching helpers - static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0); - static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0); + static ARMWord* getLdrImmAddress(ARMWord* insn) + { + // Must be an ldr ..., [pc +/- imm] + ASSERT((*insn & 0x0f7f0000) == 0x051f0000); + + ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord); + if (*insn & DT_UP) + return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK)); + return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK)); + } + + static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool) + { + // Must be an ldr ..., [pc +/- imm] + ASSERT((*insn & 0x0f7f0000) == 0x051f0000); + + if (*insn & 0x1) + return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1)); + return getLdrImmAddress(insn); + } static void patchPointerInternal(intptr_t from, void* to) { ARMWord* insn = reinterpret_cast<ARMWord*>(from); ARMWord* addr = getLdrImmAddress(insn); *addr = reinterpret_cast<ARMWord>(to); - ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord)); } static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value) @@ -685,12 +703,13 @@ namespace JSC { void linkJump(JmpSrc from, JmpDst to) { ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord)); - *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset); + ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress()); + *addr = static_cast<ARMWord>(to.m_offset); } static void linkJump(void* code, JmpSrc from, void* to) { - linkBranch(code, from, to); + patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to); } static void relinkJump(void* from, void* to) @@ -700,12 +719,12 @@ namespace JSC { static void linkCall(void* code, JmpSrc from, void* to) { - linkBranch(code, from, to, true); + patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to); } static void relinkCall(void* from, void* to) { - relinkJump(from, to); + patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to); } // Address operations @@ -747,7 +766,7 @@ namespace JSC { static ARMWord getOp2(ARMWord imm); -#if ARM_ARCH_VERSION >= 7 +#if WTF_ARM_ARCH_AT_LEAST(7) static ARMWord getImm16Op2(ARMWord imm) { if (imm <= 0xffff) @@ -812,6 +831,6 @@ namespace JSC { } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #endif // ARMAssembler_h diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h index e253a53..4e394b2 100644 --- a/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/JavaScriptCore/assembler/ARMv7Assembler.h @@ -28,7 +28,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2) +#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #include "AssemblerBuffer.h" #include <wtf/Assertions.h> @@ -201,10 +201,10 @@ class ARMThumbImmediate { ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N) { - if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \ - value >>= N; /* if any were set, lose the bottom N */ \ - else /* if none of the top N bits are set, */ \ - zeros += N; /* then we have identified N leading zeros */ + if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */ + value >>= N; /* if any were set, lose the bottom N */ + else /* if none of the top N bits are set, */ + zeros += N; /* then we have identified N leading zeros */ } static int32_t countLeadingZeros(uint32_t value) @@ -1832,6 +1832,6 @@ private: } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2) +#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #endif // ARMAssembler_h diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h index 525fe98..198e8d1 100644 --- a/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -173,16 +173,16 @@ public: struct Imm32 { explicit Imm32(int32_t value) : m_value(value) -#if PLATFORM(ARM) +#if CPU(ARM) , m_isPointer(false) #endif { } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) explicit Imm32(ImmPtr ptr) : m_value(ptr.asIntptr()) -#if PLATFORM(ARM) +#if CPU(ARM) , m_isPointer(true) #endif { @@ -190,7 +190,7 @@ public: #endif int32_t m_value; -#if PLATFORM(ARM) +#if CPU(ARM) // We rely on being able to regenerate code to recover exception handling // information. Since ARMv7 supports 16-bit immediates there is a danger // that if pointer values change the layout of the generated code will change. diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h index 858707d..76bd205 100644 --- a/JavaScriptCore/assembler/MacroAssembler.h +++ b/JavaScriptCore/assembler/MacroAssembler.h @@ -30,19 +30,19 @@ #if ENABLE(ASSEMBLER) -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) #include "MacroAssemblerARMv7.h" namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) #include "MacroAssemblerARM.h" namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; -#elif PLATFORM(X86) +#elif CPU(X86) #include "MacroAssemblerX86.h" namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; -#elif PLATFORM(X86_64) +#elif CPU(X86_64) #include "MacroAssemblerX86_64.h" namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; @@ -60,7 +60,7 @@ public: using MacroAssemblerBase::jump; using MacroAssemblerBase::branch32; using MacroAssemblerBase::branch16; -#if PLATFORM(X86_64) +#if CPU(X86_64) using MacroAssemblerBase::branchPtr; using MacroAssemblerBase::branchTestPtr; #endif @@ -133,7 +133,8 @@ public: // Ptr methods // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. -#if !PLATFORM(X86_64) + // FIXME: should this use a test for 32-bitness instead of this specific exception? +#if !CPU(X86_64) void addPtr(RegisterID src, RegisterID dest) { add32(src, dest); diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/JavaScriptCore/assembler/MacroAssemblerARM.cpp index d726ecd..b5b20fa 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARM.cpp +++ b/JavaScriptCore/assembler/MacroAssemblerARM.cpp @@ -26,11 +26,11 @@ #include "config.h" -#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #include "MacroAssemblerARM.h" -#if PLATFORM(LINUX) +#if OS(LINUX) #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> @@ -43,7 +43,7 @@ namespace JSC { static bool isVFPPresent() { -#if PLATFORM(LINUX) +#if OS(LINUX) int fd = open("/proc/self/auxv", O_RDONLY); if (fd > 0) { Elf32_auxv_t aux; @@ -62,7 +62,8 @@ static bool isVFPPresent() const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent(); -#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT +#if CPU(ARMV5_OR_LOWER) +/* On ARMv5 and below, natural alignment is required. */ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) { ARMWord op2; @@ -91,4 +92,4 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register } -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.h b/JavaScriptCore/assembler/MacroAssemblerARM.h index 24e2e11..21b8de8 100644 --- a/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -30,7 +30,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #include "ARMAssembler.h" #include "AbstractMacroAssembler.h" @@ -224,7 +224,7 @@ public: m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } -#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT +#if CPU(ARMV5_OR_LOWER) void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest); #else void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) @@ -928,6 +928,6 @@ private: } -#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL) +#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) #endif // MacroAssemblerARM_h diff --git a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h index 3681af8..cae8bf6 100644 --- a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h +++ b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h @@ -37,7 +37,7 @@ // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded // into the processor are decorated with the bottom bit set, indicating that this is // thumb code (as oposed to 32-bit traditional ARM). The first test checks for both @@ -130,7 +130,7 @@ public: } explicit MacroAssemblerCodePtr(void* value) -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) // Decorate the pointer as a thumb code pointer. : m_value(reinterpret_cast<char*>(value) + 1) #else @@ -147,7 +147,7 @@ public: } void* executableAddress() const { return m_value; } -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) // To use this pointer as a data address remove the decoration. void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; } #else diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h index 6e96240..ca7c31a 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -28,7 +28,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(X86) +#if ENABLE(ASSEMBLER) && CPU(X86) #include "MacroAssemblerX86Common.h" diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h index 96d21f1..449df86 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -542,7 +542,7 @@ public: m_assembler.movl_i32r(imm.m_value, dest); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void move(RegisterID src, RegisterID dest) { // Note: on 64-bit this is is a full register move; perhaps it would be @@ -944,8 +944,8 @@ private: // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'. friend class MacroAssemblerX86; -#if PLATFORM(X86) -#if PLATFORM(MAC) +#if CPU(X86) +#if OS(MAC_OS_X) // All X86 Macs are guaranteed to support at least SSE2, static bool isSSE2Present() @@ -953,7 +953,7 @@ private: return true; } -#else // PLATFORM(MAC) +#else // OS(MAC_OS_X) enum SSE2CheckState { NotCheckedSSE2, @@ -996,8 +996,8 @@ private: static SSE2CheckState s_sse2CheckState; -#endif // PLATFORM(MAC) -#elif !defined(NDEBUG) // PLATFORM(X86) +#endif // OS(MAC_OS_X) +#elif !defined(NDEBUG) // CPU(X86) // On x86-64 we should never be checking for SSE2 in a non-debug build, // but non debug add this method to keep the asserts above happy. diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h index 7828cf5..ec93f8c 100644 --- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -28,7 +28,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && PLATFORM(X86_64) +#if ENABLE(ASSEMBLER) && CPU(X86_64) #include "MacroAssemblerX86Common.h" diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h index cbbaaa5..ab3d05f 100644 --- a/JavaScriptCore/assembler/X86Assembler.h +++ b/JavaScriptCore/assembler/X86Assembler.h @@ -28,7 +28,7 @@ #include <wtf/Platform.h> -#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64)) +#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) #include "AssemblerBuffer.h" #include <stdint.h> @@ -50,7 +50,7 @@ namespace X86Registers { esi, edi, -#if PLATFORM(X86_64) +#if CPU(X86_64) r8, r9, r10, @@ -118,12 +118,12 @@ private: OP_XOR_GvEv = 0x33, OP_CMP_EvGv = 0x39, OP_CMP_GvEv = 0x3B, -#if PLATFORM(X86_64) +#if CPU(X86_64) PRE_REX = 0x40, #endif OP_PUSH_EAX = 0x50, OP_POP_EAX = 0x58, -#if PLATFORM(X86_64) +#if CPU(X86_64) OP_MOVSXD_GvEv = 0x63, #endif PRE_OPERAND_SIZE = 0x66, @@ -296,7 +296,7 @@ public: // Arithmetic operations: -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void adcl_im(int imm, void* addr) { if (CAN_SIGN_EXTEND_8_32(imm)) { @@ -346,7 +346,7 @@ public: } } -#if PLATFORM(X86_64) +#if CPU(X86_64) void addq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst); @@ -423,7 +423,7 @@ public: } } -#if PLATFORM(X86_64) +#if CPU(X86_64) void andq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_AND_EvGv, src, dst); @@ -509,7 +509,7 @@ public: } } -#if PLATFORM(X86_64) +#if CPU(X86_64) void orq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_OR_EvGv, src, dst); @@ -575,7 +575,7 @@ public: } } -#if PLATFORM(X86_64) +#if CPU(X86_64) void subq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst); @@ -641,7 +641,7 @@ public: } } -#if PLATFORM(X86_64) +#if CPU(X86_64) void xorq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst); @@ -689,7 +689,7 @@ public: m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void sarq_CLr(RegisterID dst) { m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst); @@ -789,7 +789,7 @@ public: m_formatter.immediate32(imm); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void cmpq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst); @@ -897,7 +897,7 @@ public: m_formatter.immediate32(imm); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void testq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst); @@ -971,7 +971,7 @@ public: m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void xchgq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); @@ -1001,7 +1001,7 @@ public: void movl_mEAX(void* addr) { m_formatter.oneByteOp(OP_MOV_EAXOv); -#if PLATFORM(X86_64) +#if CPU(X86_64) m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); #else m_formatter.immediate32(reinterpret_cast<int>(addr)); @@ -1038,14 +1038,14 @@ public: void movl_EAXm(void* addr) { m_formatter.oneByteOp(OP_MOV_OvEAX); -#if PLATFORM(X86_64) +#if CPU(X86_64) m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); #else m_formatter.immediate32(reinterpret_cast<int>(addr)); #endif } -#if PLATFORM(X86_64) +#if CPU(X86_64) void movq_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst); @@ -1157,7 +1157,7 @@ public: { m_formatter.oneByteOp(OP_LEA, dst, base, offset); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void leaq_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.oneByteOp64(OP_LEA, dst, base, offset); @@ -1323,7 +1323,7 @@ public: m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void cvtsi2sd_mr(void* address, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1343,7 +1343,7 @@ public: m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst); } -#if PLATFORM(X86_64) +#if CPU(X86_64) void movq_rr(XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1369,7 +1369,7 @@ public: m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset); } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void movsd_mr(void* address, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1535,7 +1535,7 @@ public: static void repatchLoadPtrToLEA(void* where) { -#if PLATFORM(X86_64) +#if CPU(X86_64) // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix. // Skip over the prefix byte. where = reinterpret_cast<char*>(where) + 1; @@ -1679,7 +1679,7 @@ private: memoryModRM(reg, base, index, scale, offset); } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void oneByteOp(OneByteOpcodeID opcode, int reg, void* address) { m_buffer.ensureSpace(maxInstructionSize); @@ -1722,7 +1722,7 @@ private: memoryModRM(reg, base, index, scale, offset); } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address) { m_buffer.ensureSpace(maxInstructionSize); @@ -1732,7 +1732,7 @@ private: } #endif -#if PLATFORM(X86_64) +#if CPU(X86_64) // Quad-word-sized operands: // // Used to format 64-bit operantions, planting a REX.w prefix. @@ -1891,7 +1891,7 @@ private: static const RegisterID noBase = X86Registers::ebp; static const RegisterID hasSib = X86Registers::esp; static const RegisterID noIndex = X86Registers::esp; -#if PLATFORM(X86_64) +#if CPU(X86_64) static const RegisterID noBase2 = X86Registers::r13; static const RegisterID hasSib2 = X86Registers::r12; @@ -1967,7 +1967,7 @@ private: void memoryModRM(int reg, RegisterID base, int offset) { // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. -#if PLATFORM(X86_64) +#if CPU(X86_64) if ((base == hasSib) || (base == hasSib2)) { #else if (base == hasSib) { @@ -1982,7 +1982,7 @@ private: m_buffer.putIntUnchecked(offset); } } else { -#if PLATFORM(X86_64) +#if CPU(X86_64) if (!offset && (base != noBase) && (base != noBase2)) #else if (!offset && (base != noBase)) @@ -2001,7 +2001,7 @@ private: void memoryModRM_disp32(int reg, RegisterID base, int offset) { // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. -#if PLATFORM(X86_64) +#if CPU(X86_64) if ((base == hasSib) || (base == hasSib2)) { #else if (base == hasSib) { @@ -2018,7 +2018,7 @@ private: { ASSERT(index != noIndex); -#if PLATFORM(X86_64) +#if CPU(X86_64) if (!offset && (base != noBase) && (base != noBase2)) #else if (!offset && (base != noBase)) @@ -2033,7 +2033,7 @@ private: } } -#if !PLATFORM(X86_64) +#if !CPU(X86_64) void memoryModRM(int reg, void* address) { // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! @@ -2048,6 +2048,6 @@ private: } // namespace JSC -#endif // ENABLE(ASSEMBLER) && PLATFORM(X86) +#endif // ENABLE(ASSEMBLER) && CPU(X86) #endif // X86Assembler_h |