summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/assembler
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2009-08-11 17:01:47 +0100
committerBen Murdoch <benm@google.com>2009-08-11 18:21:02 +0100
commit0bf48ef3be53ddaa52bbead65dfd75bf90e7a2b5 (patch)
tree2943df35f62d885c89d01063cc528dd73b480fea /JavaScriptCore/assembler
parent7e7a70bfa49a1122b2597a1e6367d89eb4035eca (diff)
downloadexternal_webkit-0bf48ef3be53ddaa52bbead65dfd75bf90e7a2b5.zip
external_webkit-0bf48ef3be53ddaa52bbead65dfd75bf90e7a2b5.tar.gz
external_webkit-0bf48ef3be53ddaa52bbead65dfd75bf90e7a2b5.tar.bz2
Merge in WebKit r47029.
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r--JavaScriptCore/assembler/ARMAssembler.cpp384
-rw-r--r--JavaScriptCore/assembler/ARMAssembler.h767
-rw-r--r--JavaScriptCore/assembler/ARMv7Assembler.h53
-rw-r--r--JavaScriptCore/assembler/AbstractMacroAssembler.h623
-rw-r--r--JavaScriptCore/assembler/AssemblerBuffer.h17
-rw-r--r--JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h305
-rw-r--r--JavaScriptCore/assembler/CodeLocation.h186
-rw-r--r--JavaScriptCore/assembler/LinkBuffer.h195
-rw-r--r--JavaScriptCore/assembler/MacroAssembler.h6
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARM.h794
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARMv7.h21
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerCodeRef.h30
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86.h31
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86Common.h194
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86_64.h39
-rw-r--r--JavaScriptCore/assembler/RepatchBuffer.h136
-rw-r--r--JavaScriptCore/assembler/X86Assembler.h178
17 files changed, 3339 insertions, 620 deletions
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000..69daa16
--- /dev/null
+++ b/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
+{
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+ if (constPool && (*insn & 0x1))
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
+ if (*insn & DT_UP)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
+ else
+ return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
+}
+
+void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to)
+{
+ ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
+
+ if (!from.m_latePatch) {
+ int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
+
+ if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
+ *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ return;
+ }
+ }
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+}
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return OP2_IMM | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return OP2_IMM | (imm >> 24) | (rol << 8);
+
+ return 0;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov_r(reg, imm1);
+ orr_r(reg, reg, imm2);
+ } else {
+ mvn_r(reg, imm1);
+ bic_r(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ if (invert)
+ return tmp | OP2_INV_IMM;
+ mvn_r(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(tmpReg, imm, true))
+ return tmpReg;
+ if (genInt(tmpReg, ~imm, false))
+ return tmpReg;
+
+ ldr_imm(tmpReg, imm);
+ return tmpReg;
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp) {
+ mov_r(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ mvn_r(dest, tmp);
+ return;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return;
+ if (genInt(dest, ~imm, false))
+ return;
+
+ ldr_imm(dest, imm);
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtr_u(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_u(isLoad, srcDst, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARM::S0);
+ dtr_ur(isLoad, srcDst, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff)
+ dtr_d(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ sub_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_d(isLoad, srcDst, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARM::S0);
+ dtr_dr(isLoad, srcDst, base, reg);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ARMWord op2;
+
+ ASSERT(scale >= 0 && scale <= 3);
+ op2 = lsl(index, scale);
+
+ if (offset >= 0 && offset <= 0xfff) {
+ add_r(ARM::S0, base, op2);
+ dtr_u(isLoad, srcDst, ARM::S0, offset);
+ return;
+ }
+ if (offset <= 0 && offset >= -0xfff) {
+ add_r(ARM::S0, base, op2);
+ dtr_d(isLoad, srcDst, ARM::S0, -offset);
+ return;
+ }
+
+ ldr_un_imm(ARM::S0, offset);
+ add_r(ARM::S0, ARM::S0, op2);
+ dtr_ur(isLoad, srcDst, base, ARM::S0);
+}
+
+void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset & 0x3) {
+ if (offset <= 0x3ff && offset >= 0) {
+ fdtr_u(isLoad, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add_r(ARM::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+ fdtr_u(isLoad, srcDst, ARM::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ fdtr_d(isLoad, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub_r(ARM::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+ fdtr_d(isLoad, srcDst, ARM::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ ldr_un_imm(ARM::S0, offset);
+ add_r(ARM::S0, ARM::S0, base);
+ fdtr_u(isLoad, srcDst, ARM::S0, 0);
+}
+
+void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+{
+ char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + *iter);
+ ARMWord* offset = getLdrImmAddress(ldrAddr);
+ if (*offset != 0xffffffff)
+ linkBranch(data, JmpSrc(*iter), data + *offset);
+ }
+
+ return data;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM)
diff --git a/JavaScriptCore/assembler/ARMAssembler.h b/JavaScriptCore/assembler/ARMAssembler.h
new file mode 100644
index 0000000..d3fe782
--- /dev/null
+++ b/JavaScriptCore/assembler/ARMAssembler.h
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+typedef uint32_t ARMWord;
+
+namespace ARM {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ S0 = r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ S1 = r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ sp = r13,
+ r14,
+ lr = r14,
+ r15,
+ pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ SD0 = d3
+ } FPRegisterID;
+
+} // namespace ARM
+
+ class ARMAssembler {
+ public:
+ typedef ARM::RegisterID RegisterID;
+ typedef ARM::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef WTF::SegmentedVector<int, 64> Jumps;
+
+ ARMAssembler() { }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ FADDD = 0x0e300b00,
+ FSUBD = 0x0e300b40,
+ FMULD = 0x0e200b00,
+ FCMPD = 0x0eb40b40,
+ DTR = 0x05000000,
+ LDRH = 0x00100090,
+ STRH = 0x00000090,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ FDTR = 0x0d000b00,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ FMSR = 0x0e000a10,
+ FSITOD = 0x0eb80bc0,
+ FMSTAT = 0x0ef1fa10,
+#if ARM_ARCH_VERSION >= 5
+ CLZ = 0x016f0f10,
+ BKPT = 0xe120070,
+#endif
+ };
+
+ enum {
+ OP2_IMM = (1 << 25),
+ OP2_IMMh = (1 << 22),
+ OP2_INV_IMM = (1 << 26),
+ SET_CC = (1 << 20),
+ OP2_OFSREG = (1 << 25),
+ DT_UP = (1 << 23),
+ DT_WB = (1 << 21),
+ // This flag is inlcuded in LDR and STR
+ DT_PRE = (1 << 24),
+ HDT_UH = (1 << 5),
+ DT_LOAD = (1 << 20),
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BRANCH_MASK = 0x00ffffff,
+ NONARM = 0xf0000000,
+ SDT_MASK = 0x0c000000,
+ SDT_OFFSET_MASK = 0xfff,
+ };
+
+ enum {
+ BOFFSET_MIN = -0x00800000,
+ BOFFSET_MAX = 0x007fffff,
+ SDT = 0x04000000,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xee120070,
+ };
+
+ class JmpSrc {
+ friend class ARMAssembler;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ , m_latePatch(false)
+ {
+ }
+
+ void enableLatePatch() { m_latePatch = true; }
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ , m_latePatch(false)
+ {
+ }
+
+ int m_offset : 31;
+ int m_latePatch : 1;
+ };
+
+ class JmpDst {
+ friend class ARMAssembler;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+ // Instruction formating
+
+ void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT ( ((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff)) );
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
+ }
+
+ void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
+ }
+
+ void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
+ }
+
+ void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
+ }
+
+ void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
+ }
+
+ void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
+ }
+
+ void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
+ }
+
+ void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
+ }
+
+ void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
+ }
+
+ void tst_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
+ }
+
+ void teq_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
+ }
+
+ void cmp_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
+ }
+
+ void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
+ }
+
+ void mov_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARM::r0, op2);
+ }
+
+ void movs_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARM::r0, op2);
+ }
+
+ void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
+ }
+
+ void mvn_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARM::r0, op2);
+ }
+
+ void mvns_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARM::r0, op2);
+ }
+
+ void mul_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void faddd_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
+ }
+
+ void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
+ }
+
+ void fmuld_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
+ }
+
+ void fcmpd_r(int dd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
+ }
+
+ void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm, true);
+ }
+
+ void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm);
+ }
+
+ void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, op2);
+ }
+
+ void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void dtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void ldrh_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void ldrh_d(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, op2);
+ }
+
+ void ldrh_u(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, op2);
+ }
+
+ void strh_r(int rn, int rm, int rd, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff);
+ emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff);
+ emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void push_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | DTR | DT_WB | RN(ARM::sp) | RD(reg) | 0x4);
+ }
+
+ void pop_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARM::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke_r(int reg, Condition cc = AL)
+ {
+ dtr_d(false, ARM::sp, 0, reg, cc);
+ }
+
+ inline void peek_r(int reg, Condition cc = AL)
+ {
+ dtr_u(true, reg, ARM::sp, 0, cc);
+ }
+
+ void fmsr_r(int dd, int rn, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
+ }
+
+ void fsitod_r(int dd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
+ }
+
+ void fmstat(Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
+ }
+
+#if ARM_ARCH_VERSION >= 5
+ void clz_r(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
+ }
+#endif
+
+ void bkpt(ARMWord value)
+ {
+#if ARM_ARCH_VERSION >= 5
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+#else
+ // Cannot access to Zero memory address
+ dtr_dr(true, ARM::S0, ARM::S0, ARM::S0);
+#endif
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lsl_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ int size()
+ {
+ return m_buffer.size();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ JmpDst label()
+ {
+ return JmpDst(m_buffer.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov_r(ARM::r0, ARM::r0);
+
+ return label();
+ }
+
+ JmpSrc jmp(Condition cc = AL)
+ {
+ int s = size();
+ ldr_un_imm(ARM::pc, 0xffffffff, cc);
+ m_jumps.append(s);
+ return JmpSrc(s);
+ }
+
+ void* executableCopy(ExecutablePool* allocator);
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
+ static void linkBranch(void* code, JmpSrc from, void* to);
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~0xfff));
+ return (load & ~0xfff) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Patch pointers
+
+ static void linkPointer(void* code, JmpDst from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ static void repatchLoadPtrToLEA(void* from)
+ {
+ // On arm, this is a patch from LDR to ADD. It is restricted conversion,
+ // from special case to special case, altough enough for its purpose
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ASSERT((*insn & 0x0ff00f00) == 0x05900000);
+
+ *insn = (*insn & 0xf00ff0ff) | 0x02800000;
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ }
+
+ // Linkers
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
+ *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
+ }
+
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ relinkJump(from, to);
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + jump.m_offset / sizeof(ARMWord) + 1);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + label.m_offset / sizeof(ARMWord));
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpSrc to)
+ {
+ return (to.m_offset + sizeof(ARMWord)) - from.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpDst to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ return call.m_offset + sizeof(ARMWord);
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
+ }
+
+ static ARMWord getOp2(ARMWord imm);
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
+ return AL | B | (offset & BRANCH_MASK);
+ }
+
+ private:
+ ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg;
+ }
+
+ ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 8;
+ }
+
+ ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 12;
+ }
+
+ ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & 0xf0000000;
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#endif // ARMAssembler_h
diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h
index c9cb87e..f7e2fb4 100644
--- a/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_V7)
+#if ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
#include "AssemblerBuffer.h"
#include <wtf/Assertions.h>
@@ -442,6 +442,7 @@ public:
{
}
+ void enableLatePatch() { }
private:
JmpSrc(int offset)
: m_offset(offset)
@@ -898,7 +899,7 @@ public:
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
}
- // What is wrong with you people?, xor is not spelled with an 'e'. :-(
+ // xor is not spelled with an 'e'. :-(
void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rd));
@@ -907,7 +908,7 @@ public:
m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
- // What is wrong with you people?, xor is not spelled with an 'e'. :-(
+ // xor is not spelled with an 'e'. :-(
void eor(RegisterID rd, RegisterID rn, RegisterID rm)
{
if ((rd == rn) && !((rd | rm) & 8))
@@ -1520,7 +1521,7 @@ public:
linkWithOffset(location, relative);
}
- // bah, this mathod should really be static, since it is used by the PatchBuffer.
+ // bah, this mathod should really be static, since it is used by the LinkBuffer.
// return a bool saying whether the link was successful?
static void linkCall(void* code, JmpSrc from, void* to)
{
@@ -1528,51 +1529,51 @@ public:
ASSERT(from.m_offset != -1);
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
- patchPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
}
- static void patchPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, JmpDst where, void* value)
{
- patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
static void relinkJump(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
}
static void relinkCall(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
- patchPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
}
static void repatchInt32(void* where, int32_t value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
- patchInt32(where, value);
+ setInt32(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchPointer(void* where, void* value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
- patchPointer(where, value);
+ setPointer(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchLoadPtrToLEA(void* where)
@@ -1582,8 +1583,8 @@ public:
uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
- ExecutableAllocator::MakeWritable unprotect(loadOp, sizeof(uint16_t));
*loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+ ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
}
private:
@@ -1610,12 +1611,10 @@ private:
m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
}
- static void patchInt32(void* code, uint32_t value)
+ static void setInt32(void* code, uint32_t value)
{
uint16_t* location = reinterpret_cast<uint16_t*>(code);
- ExecutableAllocator::MakeWritable unprotect(location - 4, 4 * sizeof(uint16_t));
-
uint16_t lo16 = value;
uint16_t hi16 = value >> 16;
@@ -1623,11 +1622,13 @@ private:
spliceLo11(location - 3, lo16);
spliceHi5(location - 2, hi16);
spliceLo11(location - 1, hi16);
+
+ ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
}
- static void patchPointer(void* code, void* value)
+ static void setPointer(void* code, void* value)
{
- patchInt32(code, reinterpret_cast<uint32_t>(value));
+ setInt32(code, reinterpret_cast<uint32_t>(value));
}
// Linking & patching:
@@ -1753,6 +1754,6 @@ private:
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_V7)
+#endif // ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
#endif // ARMAssembler_h
diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 7460029..f927ed2 100644
--- a/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -29,34 +29,26 @@
#include <wtf/Platform.h>
#include <MacroAssemblerCodeRef.h>
+#include <CodeLocation.h>
#include <wtf/Noncopyable.h>
#include <wtf/UnusedParam.h>
#if ENABLE(ASSEMBLER)
-// FIXME: keep transitioning this out into MacroAssemblerX86_64.
-#if PLATFORM(X86_64)
-#define REPTACH_OFFSET_CALL_R11 3
-#endif
-
namespace JSC {
+class LinkBuffer;
+class RepatchBuffer;
+
template <class AssemblerType>
class AbstractMacroAssembler {
public:
+ typedef AssemblerType AssemblerType_T;
+
typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssemblerCodeRef CodeRef;
class Jump;
- class PatchBuffer;
- class CodeLocationInstruction;
- class CodeLocationLabel;
- class CodeLocationJump;
- class CodeLocationCall;
- class CodeLocationNearCall;
- class CodeLocationDataLabel32;
- class CodeLocationDataLabelPtr;
- class ProcessorReturnAddress;
typedef typename AssemblerType::RegisterID RegisterID;
typedef typename AssemblerType::FPRegisterID FPRegisterID;
@@ -181,7 +173,7 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
-#if PLATFORM(ARM_V7)
+#if PLATFORM(ARM)
, m_isPointer(false)
#endif
{
@@ -190,7 +182,7 @@ public:
#if !PLATFORM(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
-#if PLATFORM(ARM_V7)
+#if PLATFORM(ARM)
, m_isPointer(true)
#endif
{
@@ -198,7 +190,7 @@ public:
#endif
int32_t m_value;
-#if PLATFORM(ARM_V7)
+#if PLATFORM(ARM)
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
@@ -227,7 +219,7 @@ public:
friend class AbstractMacroAssembler;
friend class Jump;
friend class MacroAssemblerCodeRef;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
Label()
@@ -252,7 +244,7 @@ public:
class DataLabelPtr {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
DataLabelPtr()
{
@@ -274,7 +266,7 @@ public:
class DataLabel32 {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
DataLabel32()
{
@@ -298,7 +290,7 @@ public:
class Call {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+
public:
enum Flags {
None = 0x0,
@@ -328,8 +320,13 @@ public:
return Call(jump.m_jmp, Linkable);
}
- private:
+ void enableLatePatch()
+ {
+ m_jmp.enableLatePatch();
+ }
+
JmpSrc m_jmp;
+ private:
Flags m_flags;
};
@@ -343,7 +340,7 @@ public:
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
Jump()
{
@@ -364,6 +361,11 @@ public:
masm->m_assembler.linkJump(m_jmp, label.m_label);
}
+ void enableLatePatch()
+ {
+ m_jmp.enableLatePatch();
+ }
+
private:
JmpSrc m_jmp;
};
@@ -373,9 +375,11 @@ public:
// A JumpList is a set of Jump objects.
// All jumps in the set will be linked to the same destination.
class JumpList {
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
+ typedef Vector<Jump, 16> JumpVector;
+
void link(AbstractMacroAssembler<AssemblerType>* masm)
{
size_t size = m_jumps.size();
@@ -406,504 +410,21 @@ public:
{
return !m_jumps.size();
}
-
- private:
- Vector<Jump, 16> m_jumps;
- };
-
-
- // Section 3: MacroAssembler JIT instruction stream handles.
- //
- // The MacroAssembler supported facilities to modify a JIT generated
- // instruction stream after it has been generated (relinking calls and
- // jumps, and repatching data values). The following types are used
- // to store handles into the underlying instruction stream, the type
- // providing semantic information as to what it is that is in the
- // instruction stream at this point, and thus what operations may be
- // performed on it.
-
-
- // CodeLocationCommon:
- //
- // Base type for other CodeLocation* types. A postion in the JIT genertaed
- // instruction stream, without any semantic information.
- class CodeLocationCommon {
- public:
- CodeLocationCommon()
- {
- }
-
- // In order to avoid the need to store multiple handles into the
- // instructions stream, where the code generation is deterministic
- // and the labels will always be a fixed distance apart, these
- // methods may be used to recover a handle that has nopw been
- // retained, based on a known fixed relative offset from one that has.
- CodeLocationInstruction instructionAtOffset(int offset);
- CodeLocationLabel labelAtOffset(int offset);
- CodeLocationJump jumpAtOffset(int offset);
- CodeLocationCall callAtOffset(int offset);
- CodeLocationNearCall nearCallAtOffset(int offset);
- CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
- CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
-
- protected:
- explicit CodeLocationCommon(CodePtr location)
- : m_location(location)
- {
- }
-
- void* dataLocation() { return m_location.dataLocation(); }
- void* executableAddress() { return m_location.executableAddress(); }
-
- void reset()
- {
- m_location = CodePtr();
- }
-
- private:
- CodePtr m_location;
- };
-
- // CodeLocationInstruction:
- //
- // An arbitrary instruction in the JIT code.
- class CodeLocationInstruction : public CodeLocationCommon {
- friend class CodeLocationCommon;
- public:
- CodeLocationInstruction()
- {
- }
-
- void repatchLoadPtrToLEA()
- {
- AssemblerType::repatchLoadPtrToLEA(this->dataLocation());
- }
-
- private:
- explicit CodeLocationInstruction(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationLabel:
- //
- // A point in the JIT code maked with a label.
- class CodeLocationLabel : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class CodeLocationJump;
- friend class CodeLocationCall;
- friend class CodeLocationNearCall;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
-
- public:
- CodeLocationLabel()
- {
- }
-
- void* addressForSwitch() { return this->executableAddress(); }
- void* addressForExceptionHandler() { return this->executableAddress(); }
- void* addressForJSR() { return this->executableAddress(); }
-
- bool operator!()
- {
- return !this->executableAddress();
- }
-
- void reset()
- {
- CodeLocationCommon::reset();
- }
-
- private:
- explicit CodeLocationLabel(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationLabel(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
-
- void* getJumpDestination() { return this->executableAddress(); }
- };
-
- // CodeLocationJump:
- //
- // A point in the JIT code at which there is a jump instruction.
- class CodeLocationJump : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationJump()
- {
- }
-
- void relink(CodeLocationLabel destination)
- {
- AssemblerType::relinkJump(this->dataLocation(), destination.dataLocation());
- }
-
- private:
- explicit CodeLocationJump(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationCall:
- //
- // A point in the JIT code at which there is a call instruction.
- class CodeLocationCall : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
- public:
- CodeLocationCall()
- {
- }
-
- void relink(CodeLocationLabel destination)
- {
-#if PLATFORM(X86_64)
- CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
-#else
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
-#endif
- }
-
- void relink(FunctionPtr destination)
- {
-#if PLATFORM(X86_64)
- CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
-#else
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
-#endif
- }
-
- // This methods returns the value that will be set as the return address
- // within a function that has been called from this call instruction.
- void* calleeReturnAddressValue()
- {
- return this->executableAddress();
- }
-
- private:
- explicit CodeLocationCall(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationCall(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationNearCall:
- //
- // A point in the JIT code at which there is a call instruction with near linkage.
- class CodeLocationNearCall : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
- public:
- CodeLocationNearCall()
- {
- }
-
- void relink(CodePtr destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- void relink(CodeLocationLabel destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- void relink(FunctionPtr destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- // This methods returns the value that will be set as the return address
- // within a function that has been called from this call instruction.
- void* calleeReturnAddressValue()
- {
- return this->executableAddress();
- }
-
- private:
- explicit CodeLocationNearCall(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationNearCall(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationDataLabel32:
- //
- // A point in the JIT code at which there is an int32_t immediate that may be repatched.
- class CodeLocationDataLabel32 : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationDataLabel32()
- {
- }
-
- void repatch(int32_t value)
- {
- AssemblerType::repatchInt32(this->dataLocation(), value);
- }
-
- private:
- explicit CodeLocationDataLabel32(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationDataLabelPtr:
- //
- // A point in the JIT code at which there is a void* immediate that may be repatched.
- class CodeLocationDataLabelPtr : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationDataLabelPtr()
- {
- }
-
- void repatch(void* value)
- {
- AssemblerType::repatchPointer(this->dataLocation(), value);
- }
-
- private:
- explicit CodeLocationDataLabelPtr(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // ProcessorReturnAddress:
- //
- // This class can be used to relink a call identified by its return address.
- class ProcessorReturnAddress {
- friend class CodeLocationCall;
- friend class CodeLocationNearCall;
- public:
- ProcessorReturnAddress(void* location)
- : m_location(location)
- {
- }
-
- void relinkCallerToTrampoline(CodeLocationLabel label)
- {
- CodeLocationCall(CodePtr(m_location)).relink(label);
- }
-
- void relinkCallerToTrampoline(CodePtr newCalleeFunction)
- {
- relinkCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
- }
-
- void relinkCallerToFunction(FunctionPtr function)
- {
- CodeLocationCall(CodePtr(m_location)).relink(function);
- }
-
- void relinkNearCallerToTrampoline(CodeLocationLabel label)
- {
- CodeLocationNearCall(CodePtr(m_location)).relink(label);
- }
- void relinkNearCallerToTrampoline(CodePtr newCalleeFunction)
- {
- relinkNearCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
- }
-
- void* addressForLookup()
- {
- return m_location.value();
- }
+ const JumpVector& jumps() { return m_jumps; }
private:
- ReturnAddressPtr m_location;
+ JumpVector m_jumps;
};
- // Section 4: PatchBuffer - utility to finalize code generation.
+ // Section 3: Misc admin methods
static CodePtr trampolineAt(CodeRef ref, Label label)
{
return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
}
- // PatchBuffer:
- //
- // This class assists in linking code generated by the macro assembler, once code generation
- // has been completed, and the code has been copied to is final location in memory. At this
- // time pointers to labels within the code may be resolved, and relative offsets to external
- // addresses may be fixed.
- //
- // Specifically:
- // * Jump objects may be linked to external targets,
- // * The address of Jump objects may taken, such that it can later be relinked.
- // * The return address of a Jump object representing a call may be acquired.
- // * The address of a Label pointing into the code may be resolved.
- // * The value referenced by a DataLabel may be fixed.
- //
- // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
- // address of calls, as opposed to a point that can be used to later relink a Jump -
- // possibly wrap the later up in an object that can do just that).
- class PatchBuffer : public Noncopyable {
- public:
- // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
- // First, executablePool is copied into m_executablePool, then the initialization of
- // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
- PatchBuffer(AbstractMacroAssembler<AssemblerType>* masm, PassRefPtr<ExecutablePool> executablePool)
- : m_executablePool(executablePool)
- , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
- , m_size(masm->m_assembler.size())
-#ifndef NDEBUG
- , m_completed(false)
-#endif
- {
- }
-
- ~PatchBuffer()
- {
- ASSERT(m_completed);
- }
-
- // These methods are used to link or set values at code generation time.
-
- void link(Call call, FunctionPtr function)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
-#if PLATFORM(X86_64)
- if (!call.isFlagSet(Call::Near)) {
- char* callLocation = reinterpret_cast<char*>(AssemblerType::getRelocatedAddress(code(), call.m_jmp)) - REPTACH_OFFSET_CALL_R11;
- AssemblerType::patchPointerForCall(callLocation, function.value());
- } else
-#endif
- AssemblerType::linkCall(code(), call.m_jmp, function.value());
- }
-
- void link(Jump jump, CodeLocationLabel label)
- {
- AssemblerType::linkJump(code(), jump.m_jmp, label.dataLocation());
- }
-
- void link(JumpList list, CodeLocationLabel label)
- {
- for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- AssemblerType::linkJump(code(), list.m_jumps[i].m_jmp, label.dataLocation());
- }
-
- void patch(DataLabelPtr label, void* value)
- {
- AssemblerType::patchPointer(code(), label.m_label, value);
- }
-
- void patch(DataLabelPtr label, CodeLocationLabel value)
- {
- AssemblerType::patchPointer(code(), label.m_label, value.getJumpDestination());
- }
-
- // These methods are used to obtain handles to allow the code to be relinked / repatched later.
-
- CodeLocationCall locationOf(Call call)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
- ASSERT(!call.isFlagSet(Call::Near));
- return CodeLocationCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
- }
-
- CodeLocationNearCall locationOfNearCall(Call call)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
- ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
- }
-
- CodeLocationLabel locationOf(Label label)
- {
- return CodeLocationLabel(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
- {
- return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- CodeLocationDataLabel32 locationOf(DataLabel32 label)
- {
- return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- // This method obtains the return address of the call, given as an offset from
- // the start of the code.
- unsigned returnAddressOffset(Call call)
- {
- return AssemblerType::getCallReturnOffset(call.m_jmp);
- }
-
- // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
- // once to complete generation of the code. 'finalizeCode()' is suited to situations
- // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
- // suited to adding to an existing allocation.
- CodeRef finalizeCode()
- {
- performFinalization();
-
- return CodeRef(m_code, m_executablePool, m_size);
- }
- CodeLocationLabel finalizeCodeAddendum()
- {
- performFinalization();
-
- return CodeLocationLabel(code());
- }
-
- private:
- // Keep this private! - the underlying code should only be obtained externally via
- // finalizeCode() or finalizeCodeAddendum().
- void* code()
- {
- return m_code;
- }
-
- void performFinalization()
- {
-#ifndef NDEBUG
- ASSERT(!m_completed);
- m_completed = true;
-#endif
-
- ExecutableAllocator::makeExecutable(code(), m_size);
- }
-
- RefPtr<ExecutablePool> m_executablePool;
- void* m_code;
- size_t m_size;
-#ifndef NDEBUG
- bool m_completed;
-#endif
- };
-
-
- // Section 5: Misc admin methods
-
size_t size()
{
return m_assembler.size();
@@ -962,50 +483,60 @@ public:
protected:
AssemblerType m_assembler;
-};
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationInstruction AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::instructionAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationLabel AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::labelAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationJump AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::jumpAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::callAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationNearCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::nearCallAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_jmp);
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabelPtr AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabelPtrAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabel32 AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabel32AtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
-}
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+ }
+};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/AssemblerBuffer.h b/JavaScriptCore/assembler/AssemblerBuffer.h
index 7a5a8d3..073906a 100644
--- a/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -95,12 +95,14 @@ namespace JSC {
void putIntUnchecked(int value)
{
+ ASSERT(!(m_size > m_capacity - 4));
*reinterpret_cast<int*>(&m_buffer[m_size]) = value;
m_size += 4;
}
void putInt64Unchecked(int64_t value)
{
+ ASSERT(!(m_size > m_capacity - 8));
*reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
m_size += 8;
}
@@ -137,10 +139,19 @@ namespace JSC {
return memcpy(result, m_buffer, m_size);
}
- private:
- void grow()
+ protected:
+ void append(const char* data, int size)
+ {
+ if (m_size > m_capacity - size)
+ grow(size);
+
+ memcpy(m_buffer + m_size, data, size);
+ m_size += size;
+ }
+
+ void grow(int extraCapacity = 0)
{
- m_capacity += m_capacity / 2;
+ m_capacity += m_capacity / 2 + extraCapacity;
if (m_buffer == m_inlineBuffer) {
char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
diff --git a/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000..f15b7f3
--- /dev/null
+++ b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool: public AssemblerBuffer {
+ typedef WTF::SegmentedVector<uint32_t, 512> LoadOffsets;
+public:
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ int size()
+ {
+ flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
+ return AssemblerBuffer::size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(allocator);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ flushIfNoSpaceFor(4, 4);
+
+ m_loadOffsets.append(AssemblerBuffer::size());
+ if (isReusable)
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, i));
+ correctDeltas(4);
+ return;
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
+ ++m_numConsts;
+
+ correctDeltas(4, 4);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier()
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = AssemblerBuffer::size();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ m_maxDistance = maxPoolSize;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts + nextConstSize / sizeof(uint32_t) >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/JavaScriptCore/assembler/CodeLocation.h b/JavaScriptCore/assembler/CodeLocation.h
new file mode 100644
index 0000000..b910b6f
--- /dev/null
+++ b/JavaScriptCore/assembler/CodeLocation.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include <wtf/Platform.h>
+
+#include <MacroAssemblerCodeRef.h>
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/JavaScriptCore/assembler/LinkBuffer.h b/JavaScriptCore/assembler/LinkBuffer.h
new file mode 100644
index 0000000..6d08117
--- /dev/null
+++ b/JavaScriptCore/assembler/LinkBuffer.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer : public Noncopyable {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+
+public:
+ // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
+ // First, executablePool is copied into m_executablePool, then the initialization of
+ // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
+ LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+ : m_executablePool(executablePool)
+ , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
+ , m_size(masm->m_assembler.size())
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
+ {
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed);
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
+ // once to complete generation of the code. 'finalizeCode()' is suited to situations
+ // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
+ // suited to adding to an existing allocation.
+ CodeRef finalizeCode()
+ {
+ performFinalization();
+
+ return CodeRef(m_code, m_executablePool, m_size);
+ }
+ CodeLocationLabel finalizeCodeAddendum()
+ {
+ performFinalization();
+
+ return CodeLocationLabel(code());
+ }
+
+private:
+ // Keep this private! - the underlying code should only be obtained externally via
+ // finalizeCode() or finalizeCodeAddendum().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void performFinalization()
+ {
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ m_completed = true;
+#endif
+
+ ExecutableAllocator::makeExecutable(code(), m_size);
+ ExecutableAllocator::cacheFlush(code(), m_size);
+ }
+
+ RefPtr<ExecutablePool> m_executablePool;
+ void* m_code;
+ size_t m_size;
+#ifndef NDEBUG
+ bool m_completed;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h
index c9749a0..9e1c5d3 100644
--- a/JavaScriptCore/assembler/MacroAssembler.h
+++ b/JavaScriptCore/assembler/MacroAssembler.h
@@ -30,10 +30,14 @@
#if ENABLE(ASSEMBLER)
-#if PLATFORM(ARM_V7)
+#if PLATFORM_ARM_ARCH(7)
#include "MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+#elif PLATFORM(ARM)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
#elif PLATFORM(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.h b/JavaScriptCore/assembler/MacroAssemblerARM.h
new file mode 100644
index 0000000..b04ed13
--- /dev/null
+++ b/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+public:
+ enum Condition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE,
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = ARMAssembler::EQ,
+ DoubleGreaterThan = ARMAssembler::GT,
+ DoubleGreaterThanOrEqual = ARMAssembler::GE,
+ DoubleLessThan = ARMAssembler::LT,
+ DoubleLessThanOrEqual = ARMAssembler::LE,
+ };
+
+ static const RegisterID stackPointerRegister = ARM::sp;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, ARM::S1);
+ add32(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARM::S1);
+ add32(ARM::S1, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ands_r(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARM::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.ands_r(dest, dest, w);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl_r(dest, shift_amount));
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ if (src == dest) {
+ move(src, ARM::S0);
+ src = ARM::S0;
+ }
+ m_assembler.muls_r(dest, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARM::S0);
+ m_assembler.muls_r(dest, src, ARM::S0);
+ }
+
+ void not32(RegisterID dest)
+ {
+ m_assembler.mvns_r(dest, dest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr_r(dest, shift_amount));
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, ARM::S1);
+ sub32(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARM::S1);
+ sub32(ARM::S1, dest);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(true, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARM::S0, 0);
+ m_assembler.dtr_ur(true, dest, address.base, ARM::S0);
+ return dataLabel;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.add_r(ARM::S0, address.base, m_assembler.lsl(address.index, address.scale));
+ if (address.offset>=0)
+ m_assembler.ldrh_u(dest, ARM::S0, ARMAssembler::getOp2Byte(address.offset));
+ else
+ m_assembler.ldrh_d(dest, ARM::S0, ARMAssembler::getOp2Byte(-address.offset));
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARM::S0, 0);
+ m_assembler.dtr_ur(false, src, address.base, ARM::S0);
+ return dataLabel;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(false, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(ARM::S1, imm.m_value);
+ else
+ move(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtr_u(false, src, ARM::S0, 0);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address));
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(ARM::S1, imm.m_value);
+ else
+ m_assembler.moveImm(imm.m_value, ARM::S1);
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARM::S1);
+ push(ARM::S1);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, ARM::S0);
+ push(ARM::S0);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(dest, imm.m_value);
+ else
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov_r(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ m_assembler.mov_r(ARM::S0, reg1);
+ m_assembler.mov_r(reg1, reg2);
+ m_assembler.mov_r(reg2, ARM::S0);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp_r(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ if (right.m_isPointer) {
+ m_assembler.ldr_un_imm(ARM::S0, right.m_value);
+ m_assembler.cmp_r(left, ARM::S0);
+ } else
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0));
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, ARM::S1);
+ return branch32(cond, left, ARM::S1);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ UNUSED_PARAM(cond);
+ UNUSED_PARAM(left);
+ UNUSED_PARAM(right);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load16(left, ARM::S0);
+ move(right, ARM::S1);
+ m_assembler.cmp_r(ARM::S0, ARM::S1);
+ return m_assembler.jmp(ARMCondition(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst_r(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARM::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(ARM::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.tst_r(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARM::S1);
+ return branchTest32(cond, ARM::S1, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARM::S1);
+ return branchTest32(cond, ARM::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, ARM::pc);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARM::pc);
+ }
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest) {
+ move(src1, ARM::S0);
+ src1 = ARM::S0;
+ }
+ m_assembler.mull_r(ARM::S1, dest, src2, src1);
+ m_assembler.cmp_r(ARM::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src, dest, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARM::S0);
+ mull32(ARM::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ prepareCall();
+ move(ARM::pc, target);
+ JmpSrc jmpSrc;
+ return Call(jmpSrc, Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ pop(ARM::pc);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, right);
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, ARM::S1);
+ if (mask.m_value == -1)
+ m_assembler.cmp_r(0, ARM::S1);
+ else
+ m_assembler.tst_r(ARM::S1, m_assembler.getImm(mask.m_value, ARM::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.ldr_un_imm(ARM::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0);
+ add32(imm, ARM::S1);
+ m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.ldr_un_imm(ARM::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0);
+ sub32(imm, ARM::S1);
+ m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ m_assembler.ldr_un_imm(ARM::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtr_u(true, dest, ARM::S0, 0);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ load32(left.m_ptr, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Call call()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, ARM::S1);
+ Jump jump = branch32(cond, left, ARM::S1);
+ jump.enableLatePatch();
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, ARM::S1);
+ dataLabel = moveWithPatch(initialRightValue, ARM::S0);
+ Jump jump = branch32(cond, ARM::S0, ARM::S1);
+ jump.enableLatePatch();
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARM::S1);
+ store32(ARM::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(ImmPtr(0), address);
+ }
+
+ // Floating point operators
+ bool supportsFloatingPoint() const
+ {
+ // FIXME: should be a dynamic test: VFP, FPA, or nothing
+ return false;
+ }
+
+ bool supportsFloatingPointTruncate() const
+ {
+ return false;
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ m_assembler.doubleTransfer(true, dest, address.base, address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ m_assembler.doubleTransfer(false, src, address.base, address.offset);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.faddd_r(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARM::SD0);
+ addDouble(ARM::SD0, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fsubd_r(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARM::SD0);
+ subDouble(ARM::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmuld_r(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARM::SD0);
+ mulDouble(ARM::SD0, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmsr_r(dest, src);
+ m_assembler.fsitod_r(dest, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.fcmpd_r(left, right);
+ m_assembler.fmstat();
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+protected:
+ ARMAssembler::Condition ARMCondition(Condition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void prepareCall()
+ {
+ m_assembler.ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord));
+
+ // S0 might be used for parameter passing
+ m_assembler.add_r(ARM::S1, ARM::pc, ARMAssembler::OP2_IMM | 0x4);
+ m_assembler.push_r(ARM::S1);
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ if (base == ARM::sp)
+ offset += 4;
+
+ if (offset >= 0) {
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_u(true, ARM::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.add_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_u(true, ARM::pc, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARM::S0);
+ prepareCall();
+ m_assembler.dtr_ur(true, ARM::pc, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_d(true, ARM::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.sub_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_d(true, ARM::pc, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARM::S0);
+ prepareCall();
+ m_assembler.dtr_dr(true, ARM::pc, base, reg);
+ }
+ }
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif
+
+#endif // MacroAssemblerARM_h
diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 5ccbd43..f7a8402 100644
--- a/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -1054,6 +1054,25 @@ protected:
{
return static_cast<ARMv7Assembler::Condition>(cond);
}
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 0aa985c..341a7ff 100644
--- a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -37,7 +37,7 @@
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
-#if PLATFORM(ARM_V7)
+#if PLATFORM_ARM_ARCH(7)
// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
// into the processor are decorated with the bottom bit set, indicating that this is
// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
@@ -46,9 +46,12 @@
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
#else
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
#endif
namespace JSC {
@@ -98,6 +101,12 @@ public:
ASSERT_VALID_CODE_POINTER(m_value);
}
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
void* value() const { return m_value; }
private:
@@ -115,7 +124,7 @@ public:
}
explicit MacroAssemblerCodePtr(void* value)
-#if PLATFORM(ARM_V7)
+#if PLATFORM_ARM_ARCH(7)
// Decorate the pointer as a thumb code pointer.
: m_value(reinterpret_cast<char*>(value) + 1)
#else
@@ -132,13 +141,18 @@ public:
}
void* executableAddress() const { return m_value; }
-#if PLATFORM(ARM_V7)
+#if PLATFORM_ARM_ARCH(7)
// To use this pointer as a data address remove the decoration.
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
#else
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
#endif
+ bool operator!()
+ {
+ return !m_value;
+ }
+
private:
void* m_value;
};
@@ -151,28 +165,20 @@ private:
class MacroAssemblerCodeRef {
public:
MacroAssemblerCodeRef()
-#ifndef NDEBUG
: m_size(0)
-#endif
{
}
MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
: m_code(code)
, m_executablePool(executablePool)
+ , m_size(size)
{
-#ifndef NDEBUG
- m_size = size;
-#else
- UNUSED_PARAM(size);
-#endif
}
MacroAssemblerCodePtr m_code;
RefPtr<ExecutablePool> m_executablePool;
-#ifndef NDEBUG
size_t m_size;
-#endif
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h
index aaf98fd..6e96240 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -51,6 +51,8 @@ public:
using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::branch32;
using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
void add32(Imm32 imm, RegisterID src, RegisterID dest)
{
@@ -87,6 +89,17 @@ public:
m_assembler.movl_mr(address, dest);
}
+ void loadDouble(void* address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+ }
+
void store32(Imm32 imm, void* address)
{
m_assembler.movl_i32m(imm.m_value, address);
@@ -164,6 +177,24 @@ public:
private:
const bool m_isSSE2Present;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index cea691e..c9e3569 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -57,6 +57,7 @@ public:
enum DoubleCondition {
DoubleEqual = X86Assembler::ConditionE,
+ DoubleNotEqual = X86Assembler::ConditionNE,
DoubleGreaterThan = X86Assembler::ConditionA,
DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
DoubleLessThan = X86Assembler::ConditionB,
@@ -91,6 +92,11 @@ public:
{
m_assembler.addl_mr(src.offset, src.base, dest);
}
+
+ void add32(RegisterID src, Address dest)
+ {
+ m_assembler.addl_rm(src, dest.offset, dest.base);
+ }
void and32(RegisterID src, RegisterID dest)
{
@@ -102,6 +108,16 @@ public:
m_assembler.andl_ir(imm.m_value, dest);
}
+ void and32(RegisterID src, Address dest)
+ {
+ m_assembler.andl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ m_assembler.andl_mr(src.offset, src.base, dest);
+ }
+
void and32(Imm32 imm, Address address)
{
m_assembler.andl_im(imm.m_value, address.offset, address.base);
@@ -138,16 +154,36 @@ public:
{
m_assembler.imull_rr(src, dest);
}
+
+ void mul32(Address src, RegisterID dest)
+ {
+ m_assembler.imull_mr(src.offset, src.base, dest);
+ }
void mul32(Imm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.imull_i32r(src, imm.m_value, dest);
}
-
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.negl_r(srcDest);
+ }
+
+ void neg32(Address srcDest)
+ {
+ m_assembler.negl_m(srcDest.offset, srcDest.base);
+ }
+
void not32(RegisterID srcDest)
{
m_assembler.notl_r(srcDest);
}
+
+ void not32(Address srcDest)
+ {
+ m_assembler.notl_m(srcDest.offset, srcDest.base);
+ }
void or32(RegisterID src, RegisterID dest)
{
@@ -159,6 +195,16 @@ public:
m_assembler.orl_ir(imm.m_value, dest);
}
+ void or32(RegisterID src, Address dest)
+ {
+ m_assembler.orl_rm(src, dest.offset, dest.base);
+ }
+
+ void or32(Address src, RegisterID dest)
+ {
+ m_assembler.orl_mr(src.offset, src.base, dest);
+ }
+
void or32(Imm32 imm, Address address)
{
m_assembler.orl_im(imm.m_value, address.offset, address.base);
@@ -211,14 +257,35 @@ public:
m_assembler.subl_mr(src.offset, src.base, dest);
}
+ void sub32(RegisterID src, Address dest)
+ {
+ m_assembler.subl_rm(src, dest.offset, dest.base);
+ }
+
+
void xor32(RegisterID src, RegisterID dest)
{
m_assembler.xorl_rr(src, dest);
}
- void xor32(Imm32 imm, RegisterID srcDest)
+ void xor32(Imm32 imm, Address dest)
{
- m_assembler.xorl_ir(imm.m_value, srcDest);
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.xorl_ir(imm.m_value, dest);
+ }
+
+ void xor32(RegisterID src, Address dest)
+ {
+ m_assembler.xorl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(Address src, RegisterID dest)
+ {
+ m_assembler.xorl_mr(src.offset, src.base, dest);
}
@@ -300,6 +367,18 @@ public:
m_assembler.addsd_mr(src.offset, src.base, dest);
}
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_rr(src, dest);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_mr(src.offset, src.base, dest);
+ }
+
void subDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
@@ -330,6 +409,11 @@ public:
m_assembler.cvtsi2sd_rr(src, dest);
}
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+ }
+
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
@@ -337,6 +421,12 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
+ {
+ m_assembler.ucomisd_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
// Truncates 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
@@ -348,6 +438,12 @@ public:
return branch32(Equal, dest, Imm32(0x80000000));
}
+ void zeroDouble(FPRegisterID srcDest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(srcDest, srcDest);
+ }
+
// Stack manipulation operations:
//
@@ -397,7 +493,8 @@ public:
{
// Note: on 64-bit this is is a full register move; perhaps it would be
// useful to have separate move32 & movePtr, with move32 zero extending?
- m_assembler.movq_rr(src, dest);
+ if (src != dest)
+ m_assembler.movq_rr(src, dest);
}
void move(ImmPtr imm, RegisterID dest)
@@ -605,12 +702,40 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branchAdd32(Condition cond, Imm32 src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, RegisterID src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT(cond == Overflow);
mul32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+
+ Jump branchMul32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ mul32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
@@ -632,7 +757,35 @@ public:
sub32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
-
+
+ Jump branchSub32(Condition cond, Imm32 imm, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
// Miscellaneous operations:
@@ -661,6 +814,27 @@ public:
m_assembler.ret();
}
+ void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_mr(left.offset, left.base, right);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpl_rr(right, left);
@@ -682,6 +856,16 @@ public:
// The mask should be optional... paerhaps the argument order should be
// dest-src, operations always have a dest? ... possibly not true, considering
// asm ops like test, or pseudo ops like pop().
+
+ void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index ffdca7c..e3d296c 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -32,6 +32,8 @@
#include "MacroAssemblerX86Common.h"
+#define REPTACH_OFFSET_CALL_R11 3
+
namespace JSC {
class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
@@ -48,6 +50,8 @@ public:
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
void add32(Imm32 imm, AbsoluteAddress address)
{
@@ -84,6 +88,18 @@ public:
}
}
+ void loadDouble(void* address, FPRegisterID dest)
+ {
+ move(ImmPtr(address), scratchRegister);
+ loadDouble(scratchRegister, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ move(Imm32(*static_cast<int32_t*>(src.m_ptr)), scratchRegister);
+ m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+ }
+
void store32(Imm32 imm, void* address)
{
move(X86::eax, scratchRegister);
@@ -446,6 +462,29 @@ public:
bool supportsFloatingPoint() const { return true; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return true; }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/RepatchBuffer.h b/JavaScriptCore/assembler/RepatchBuffer.h
new file mode 100644
index 0000000..89cbf06
--- /dev/null
+++ b/JavaScriptCore/assembler/RepatchBuffer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ {
+ JITCode& code = codeBlock->getJITCode();
+ m_start = code.start();
+ m_size = code.size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+ }
+
+ ~RepatchBuffer()
+ {
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ MacroAssembler::repatchLoadPtrToLEA(instruction);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+private:
+ void* m_start;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h
index 7a8b58d..fb58361 100644
--- a/JavaScriptCore/assembler/X86Assembler.h
+++ b/JavaScriptCore/assembler/X86Assembler.h
@@ -114,10 +114,12 @@ private:
OP_OR_GvEv = 0x0B,
OP_2BYTE_ESCAPE = 0x0F,
OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
OP_SUB_EvGv = 0x29,
OP_SUB_GvEv = 0x2B,
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
#if PLATFORM(X86_64)
@@ -169,6 +171,8 @@ private:
OP2_ADDSD_VsdWsd = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_XORPD_VpdWpd = 0x57,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVD_EdVd = 0x7E,
OP2_JCC_rel32 = 0x80,
@@ -205,6 +209,7 @@ private:
GROUP3_OP_TEST = 0,
GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
GROUP3_OP_IDIV = 7,
GROUP5_OP_CALLN = 2,
@@ -226,6 +231,7 @@ public:
{
}
+ void enableLatePatch() { }
private:
JmpSrc(int offset)
: m_offset(offset)
@@ -318,6 +324,11 @@ public:
m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
}
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
void addl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -385,6 +396,16 @@ public:
m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
}
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
void andl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -436,11 +457,26 @@ public:
}
#endif
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
void notl_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
}
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
void orl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
@@ -451,6 +487,11 @@ public:
m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
}
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
void orl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -512,6 +553,11 @@ public:
m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
}
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
void subl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -568,6 +614,27 @@ public:
m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
}
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
void xorl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -648,7 +715,12 @@ public:
{
m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
}
-
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
{
m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
@@ -1153,6 +1225,11 @@ public:
return m_formatter.immediateRel32();
}
+ JmpSrc jz()
+ {
+ return je();
+ }
+
JmpSrc jl()
{
m_formatter.twoByteOp(jccRel32(ConditionL));
@@ -1245,6 +1322,20 @@ public:
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
}
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void cvtsi2sd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
@@ -1283,6 +1374,14 @@ public:
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
}
+#if !PLATFORM(X86_64)
+ void movsd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
@@ -1320,6 +1419,30 @@ public:
m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
// Misc instructions:
void int3()
@@ -1344,6 +1467,11 @@ public:
return JmpDst(m_formatter.size());
}
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ {
+ return JmpDst(jump.m_offset + offset);
+ }
+
JmpDst align(int alignment)
{
while (!m_formatter.isAligned(alignment))
@@ -1366,59 +1494,48 @@ public:
ASSERT(to.m_offset != -1);
char* code = reinterpret_cast<char*>(m_formatter.data());
- patchRel32(code + from.m_offset, code + to.m_offset);
+ setRel32(code + from.m_offset, code + to.m_offset);
}
static void linkJump(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
- patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
static void linkCall(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
- patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
- }
-
-#if PLATFORM(X86_64)
- static void patchPointerForCall(void* where, void* value)
- {
- reinterpret_cast<void**>(where)[-1] = value;
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
-#endif
- static void patchPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, JmpDst where, void* value)
{
ASSERT(where.m_offset != -1);
- patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
static void relinkJump(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
- patchRel32(from, to);
+ setRel32(from, to);
}
static void relinkCall(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
- patchRel32(from, to);
+ setRel32(from, to);
}
static void repatchInt32(void* where, int32_t value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(int32_t), sizeof(int32_t));
- patchInt32(where, value);
+ setInt32(where, value);
}
static void repatchPointer(void* where, void* value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(void*), sizeof(void*));
- patchPointer(where, value);
+ setPointer(where, value);
}
static void repatchLoadPtrToLEA(void* where)
@@ -1428,7 +1545,6 @@ public:
// Skip over the prefix byte.
where = reinterpret_cast<char*>(where) + 1;
#endif
- ExecutableAllocator::MakeWritable unprotect(where, 1);
*reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
}
@@ -1476,22 +1592,22 @@ public:
private:
- static void patchPointer(void* where, void* value)
+ static void setPointer(void* where, void* value)
{
reinterpret_cast<void**>(where)[-1] = value;
}
- static void patchInt32(void* where, int32_t value)
+ static void setInt32(void* where, int32_t value)
{
reinterpret_cast<int32_t*>(where)[-1] = value;
}
- static void patchRel32(void* from, void* to)
+ static void setRel32(void* from, void* to)
{
intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
ASSERT(offset == static_cast<int32_t>(offset));
- patchInt32(from, offset);
+ setInt32(from, offset);
}
class X86InstructionFormatter {
@@ -1611,6 +1727,16 @@ private:
memoryModRM(reg, base, index, scale, offset);
}
+#if !PLATFORM(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
#if PLATFORM(X86_64)
// Quad-word-sized operands:
//