summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r--JavaScriptCore/assembler/ARMv7Assembler.h1758
-rw-r--r--JavaScriptCore/assembler/AbstractMacroAssembler.h369
-rw-r--r--JavaScriptCore/assembler/AssemblerBuffer.h2
-rw-r--r--JavaScriptCore/assembler/MacroAssembler.h11
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARMv7.h1063
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerCodeRef.h182
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86.h59
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86Common.h291
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86_64.h85
-rw-r--r--JavaScriptCore/assembler/X86Assembler.h240
10 files changed, 3842 insertions, 218 deletions
diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000..c9cb87e
--- /dev/null
+++ b/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -0,0 +1,1758 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_V7)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARM {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ // s0 == d0 == q0
+ // s4 == d2 == q1
+ // etc
+ typedef enum {
+ s0 = 0,
+ s1 = 1,
+ s2 = 2,
+ s3 = 3,
+ s4 = 4,
+ s5 = 5,
+ s6 = 6,
+ s7 = 7,
+ s8 = 8,
+ s9 = 9,
+ s10 = 10,
+ s11 = 11,
+ s12 = 12,
+ s13 = 13,
+ s14 = 14,
+ s15 = 15,
+ s16 = 16,
+ s17 = 17,
+ s18 = 18,
+ s19 = 19,
+ s20 = 20,
+ s21 = 21,
+ s22 = 22,
+ s23 = 23,
+ s24 = 24,
+ s25 = 25,
+ s26 = 26,
+ s27 = 27,
+ s28 = 28,
+ s29 = 29,
+ s30 = 30,
+ s31 = 31,
+ d0 = 0 << 1,
+ d1 = 1 << 1,
+ d2 = 2 << 1,
+ d3 = 3 << 1,
+ d4 = 4 << 1,
+ d5 = 5 << 1,
+ d6 = 6 << 1,
+ d7 = 7 << 1,
+ d8 = 8 << 1,
+ d9 = 9 << 1,
+ d10 = 10 << 1,
+ d11 = 11 << 1,
+ d12 = 12 << 1,
+ d13 = 13 << 1,
+ d14 = 14 << 1,
+ d15 = 15 << 1,
+ d16 = 16 << 1,
+ d17 = 17 << 1,
+ d18 = 18 << 1,
+ d19 = 19 << 1,
+ d20 = 20 << 1,
+ d21 = 21 << 1,
+ d22 = 22 << 1,
+ d23 = 23 << 1,
+ d24 = 24 << 1,
+ d25 = 25 << 1,
+ d26 = 26 << 1,
+ d27 = 27 << 1,
+ d28 = 28 << 1,
+ d29 = 29 << 1,
+ d30 = 30 << 1,
+ d31 = 31 << 1,
+ q0 = 0 << 2,
+ q1 = 1 << 2,
+ q2 = 2 << 2,
+ q3 = 3 << 2,
+ q4 = 4 << 2,
+ q5 = 5 << 2,
+ q6 = 6 << 2,
+ q7 = 7 << 2,
+ q8 = 8 << 2,
+ q9 = 9 << 2,
+ q10 = 10 << 2,
+ q11 = 11 << 2,
+ q12 = 12 << 2,
+ q13 = 13 << 2,
+ q14 = 14 << 2,
+ q15 = 15 << 2,
+ q16 = 16 << 2,
+ q17 = 17 << 2,
+ q18 = 18 << 2,
+ q19 = 19 << 2,
+ q20 = 20 << 2,
+ q21 = 21 << 2,
+ q22 = 22 << 2,
+ q23 = 23 << 2,
+ q24 = 24 << 2,
+ q25 = 25 << 2,
+ q26 = 26 << 2,
+ q27 = 27 << 2,
+ q28 = 28 << 2,
+ q29 = 29 << 2,
+ q30 = 30 << 2,
+ q31 = 31 << 2,
+ } FPRegisterID;
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static int32_t countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
+ value >>= N; /* if any were set, lose the bottom N */ \
+ else /* if none of the top N bits are set, */ \
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ARMv7Assembler;
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 5;
+ };
+ } m_u;
+};
+
+
+/*
+Some features of the Thumb instruction set are deprecated in ARMv7. Deprecated features affecting
+instructions supported by ARMv7-M are as follows:
+• use of the PC as <Rd> or <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit CMP (register) instruction
+• use of MOV (register) instructions in which <Rd> is the SP or PC and <Rm> is also the SP or PC.
+• use of <Rn> as the lowest-numbered register in the register list of a 16-bit STM instruction with base
+register writeback
+*/
+
+class ARMv7Assembler {
+public:
+ typedef ARM::RegisterID RegisterID;
+ typedef ARM::FPRegisterID FPRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS,
+ ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+
+ ConditionCS = ConditionHS,
+ ConditionCC = ConditionLO,
+ } Condition;
+
+ class JmpSrc {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ bool BadReg(RegisterID reg)
+ {
+ return (reg == ARM::sp) || (reg == ARM::pc);
+ }
+
+ bool isSingleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~31);
+ }
+
+ bool isDoubleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~(31 << 1));
+ }
+
+ bool isQuadRegister(FPRegisterID reg)
+ {
+ return !(reg & ~(31 << 2));
+ }
+
+ uint32_t singleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isSingleRegister(reg));
+ return reg;
+ }
+
+ uint32_t doubleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isDoubleRegister(reg));
+ return reg >> 1;
+ }
+
+ uint32_t quadRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isQuadRegister(reg));
+ return reg >> 2;
+ }
+
+ uint32_t singleRegisterMask(FPRegisterID rd, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdNum = singleRegisterNum(rd);
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPRegisterID rd, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdNum = doubleRegisterNum(rd);
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_ADD_S_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_SUB_S_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_ADD_S_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_SUB_S_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_ADD_S_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_SUB_S_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_STR_reg_T1 = 0x5000,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_SMULL_T1 = 0xFB80,
+ } OpcodeID1;
+
+ typedef enum {
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARM::sp) {
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
+ return;
+ } else if ((rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc blx(RegisterID rm)
+ {
+ ASSERT(rm != ARM::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return JmpSrc(m_formatter.size());
+ }
+
+ void bkpt(uint8_t imm=0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // What is wrong with you people?, xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // What is wrong with you people?, xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARM::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+ void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ void vadd_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcmp_F64(FPRegisterID rd, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb4 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcvt_F64_S32(FPRegisterID fd, FPRegisterID sm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb8 | doubleRegisterMask(fd, 6, 28) | singleRegisterMask(sm, 16, 21));
+ }
+
+ void vcvt_S32_F64(FPRegisterID sd, FPRegisterID fm)
+ {
+ m_formatter.vfpOp(0x0bc0eebd | singleRegisterMask(sd, 28, 6) | doubleRegisterMask(fm, 21, 16));
+ }
+
+ void vldr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, true);
+ }
+
+ void vmov(RegisterID rd, FPRegisterID sn)
+ {
+ m_formatter.vfpOp(0x0a10ee10 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ void vmov(FPRegisterID sn, RegisterID rd)
+ {
+ m_formatter.vfpOp(0x0a10ee00 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ // move FPSCR flags to APSR.
+ void vmrs_APSR_nzcv_FPSCR()
+ {
+ m_formatter.vfpOp(0xfa10eef1);
+ }
+
+ void vmul_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee20 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vstr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, false);
+ }
+
+ void vsub_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b40ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+
+ JmpDst label()
+ {
+ return JmpDst(m_formatter.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ ASSERT(jump.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst destination)
+ {
+ ASSERT(destination.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t size() const
+ {
+ return m_formatter.size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void* copy = m_formatter.executableCopy(allocator);
+ ASSERT(copy);
+ return copy;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ ASSERT(call.m_offset >= 0);
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ASSERT(to.m_offset != -1);
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(m_formatter.data()) + from.m_offset);
+ intptr_t relative = to.m_offset - from.m_offset;
+
+ linkWithOffset(location, relative);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(location);
+
+ linkWithOffset(location, relative);
+ }
+
+ // bah, this mathod should really be static, since it is used by the PatchBuffer.
+ // return a bool saying whether the link was successful?
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.m_offset != -1);
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ patchPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+ }
+
+ static void patchPointer(void* code, JmpDst where, void* value)
+ {
+ patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ patchPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ patchInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ patchPointer(where, value);
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
+ ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
+
+ ExecutableAllocator::MakeWritable unprotect(loadOp, sizeof(uint16_t));
+ *loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+ }
+
+private:
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ void vmem(FPRegisterID rd, RegisterID rn, int32_t imm, bool isLoad)
+ {
+ bool up;
+ uint32_t offset;
+ if (imm < 0) {
+ offset = -imm;
+ up = false;
+ } else {
+ offset = imm;
+ up = true;
+ }
+
+ // offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
+ // reperesented in the instruction. Left shift by 14, to mov it into position 0x00AA0000.
+ ASSERT((offset & ~(0xff << 2)) == 0);
+ offset <<= 14;
+
+ m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
+ }
+
+ static void patchInt32(void* code, uint32_t value)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+
+ ExecutableAllocator::MakeWritable unprotect(location - 4, 4 * sizeof(uint16_t));
+
+ uint16_t lo16 = value;
+ uint16_t hi16 = value >> 16;
+
+ spliceHi5(location - 4, lo16);
+ spliceLo11(location - 3, lo16);
+ spliceHi5(location - 2, hi16);
+ spliceLo11(location - 1, hi16);
+ }
+
+ static void patchPointer(void* code, void* value)
+ {
+ patchInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ // Linking & patching:
+ // This method assumes that the JmpSrc being linked is a T4 b instruction.
+ static void linkWithOffset(uint16_t* instruction, intptr_t relative)
+ {
+ // Currently branches > 16m = mostly deathy.
+ if (((relative << 7) >> 7) != relative) {
+ // FIXME: This CRASH means we cannot turn the JIT on by default on arm-v7.
+ fprintf(stderr, "Error: Cannot link T4b.\n");
+ CRASH();
+ }
+
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+
+ int word1 = ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ int word2 = ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+
+ instruction[-2] = OP_B_T4a | word1;
+ instruction[-1] = OP_B_T4b | word2;
+ }
+
+ // These functions can be used to splice 16-bit immediates back into previously generated instructions.
+ static void spliceHi5(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = (what >> 12) | ((what & 0x0800) >> 1);
+ *where = (*where & 0xFBF0) | pattern;
+ }
+ static void spliceLo11(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = ((what & 0x0700) << 4) | (what & 0x00FF);
+ *where = (*where & 0x8F00) | pattern;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+ void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ m_buffer.putShort(op | (imm.m_value.i << 10) | imm4);
+ m_buffer.putShort((imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8);
+ }
+
+ void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ void vfpOp(int32_t op)
+ {
+ m_buffer.putInt(op);
+ }
+
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+ void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_V7)
+
+#endif // ARMAssembler_h
diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 851b6d5..7460029 100644
--- a/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -28,22 +28,38 @@
#include <wtf/Platform.h>
+#include <MacroAssemblerCodeRef.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/UnusedParam.h>
+
#if ENABLE(ASSEMBLER)
+// FIXME: keep transitioning this out into MacroAssemblerX86_64.
+#if PLATFORM(X86_64)
+#define REPTACH_OFFSET_CALL_R11 3
+#endif
+
namespace JSC {
template <class AssemblerType>
class AbstractMacroAssembler {
public:
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
class Jump;
class PatchBuffer;
+ class CodeLocationInstruction;
class CodeLocationLabel;
class CodeLocationJump;
class CodeLocationCall;
+ class CodeLocationNearCall;
class CodeLocationDataLabel32;
class CodeLocationDataLabelPtr;
+ class ProcessorReturnAddress;
typedef typename AssemblerType::RegisterID RegisterID;
+ typedef typename AssemblerType::FPRegisterID FPRegisterID;
typedef typename AssemblerType::JmpSrc JmpSrc;
typedef typename AssemblerType::JmpDst JmpDst;
@@ -165,17 +181,32 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
+#if PLATFORM(ARM_V7)
+ , m_isPointer(false)
+#endif
{
}
#if !PLATFORM(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
+#if PLATFORM(ARM_V7)
+ , m_isPointer(true)
+#endif
{
}
#endif
int32_t m_value;
+#if PLATFORM(ARM_V7)
+ // We rely on being able to regenerate code to recover exception handling
+ // information. Since ARMv7 supports 16-bit immediates there is a danger
+ // that if pointer values change the layout of the generated code will change.
+ // To avoid this problem, always generate pointers (and thus Imm32s constructed
+ // from ImmPtrs) with a code sequence that is able to represent any pointer
+ // value - don't use a more compact form in these cases.
+ bool m_isPointer;
+#endif
};
@@ -192,10 +223,12 @@ public:
// A Label records a point in the generated instruction stream, typically such that
// it may be used as a destination for a jump.
class Label {
- friend class Jump;
- template<class AssemblerType_T>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
+ friend class Jump;
+ friend class MacroAssemblerCodeRef;
friend class PatchBuffer;
+
public:
Label()
{
@@ -217,7 +250,7 @@ public:
// A DataLabelPtr is used to refer to a location in the code containing a pointer to be
// patched after the code has been generated.
class DataLabelPtr {
- template<class AssemblerType_T>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class PatchBuffer;
public:
@@ -239,7 +272,7 @@ public:
// A DataLabelPtr is used to refer to a location in the code containing a pointer to be
// patched after the code has been generated.
class DataLabel32 {
- template<class AssemblerType_T>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class PatchBuffer;
public:
@@ -263,9 +296,9 @@ public:
// relative offset such that when executed it will call to the desired
// destination.
class Call {
- friend class PatchBuffer;
- template<class AssemblerType_T>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
+ friend class PatchBuffer;
public:
enum Flags {
None = 0x0,
@@ -307,10 +340,10 @@ public:
// relative offset such that when executed it will jump to the desired
// destination.
class Jump {
- friend class PatchBuffer;
- template<class AssemblerType_T>
+ template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
+ friend class PatchBuffer;
public:
Jump()
{
@@ -397,7 +430,6 @@ public:
class CodeLocationCommon {
public:
CodeLocationCommon()
- : m_location(0)
{
}
@@ -406,22 +438,52 @@ public:
// and the labels will always be a fixed distance apart, these
// methods may be used to recover a handle that has nopw been
// retained, based on a known fixed relative offset from one that has.
+ CodeLocationInstruction instructionAtOffset(int offset);
CodeLocationLabel labelAtOffset(int offset);
CodeLocationJump jumpAtOffset(int offset);
CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
- operator bool() { return m_location; }
- void reset() { m_location = 0; }
-
protected:
- explicit CodeLocationCommon(void* location)
+ explicit CodeLocationCommon(CodePtr location)
: m_location(location)
{
}
- void* m_location;
+ void* dataLocation() { return m_location.dataLocation(); }
+ void* executableAddress() { return m_location.executableAddress(); }
+
+ void reset()
+ {
+ m_location = CodePtr();
+ }
+
+ private:
+ CodePtr m_location;
+ };
+
+ // CodeLocationInstruction:
+ //
+ // An arbitrary instruction in the JIT code.
+ class CodeLocationInstruction : public CodeLocationCommon {
+ friend class CodeLocationCommon;
+ public:
+ CodeLocationInstruction()
+ {
+ }
+
+ void repatchLoadPtrToLEA()
+ {
+ AssemblerType::repatchLoadPtrToLEA(this->dataLocation());
+ }
+
+ private:
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(CodePtr(location))
+ {
+ }
};
// CodeLocationLabel:
@@ -430,23 +492,42 @@ public:
class CodeLocationLabel : public CodeLocationCommon {
friend class CodeLocationCommon;
friend class CodeLocationJump;
+ friend class CodeLocationCall;
+ friend class CodeLocationNearCall;
friend class PatchBuffer;
+ friend class ProcessorReturnAddress;
+
public:
CodeLocationLabel()
{
}
- void* addressForSwitch() { return this->m_location; }
- void* addressForExceptionHandler() { return this->m_location; }
- void* addressForJSR() { return this->m_location; }
+ void* addressForSwitch() { return this->executableAddress(); }
+ void* addressForExceptionHandler() { return this->executableAddress(); }
+ void* addressForJSR() { return this->executableAddress(); }
+
+ bool operator!()
+ {
+ return !this->executableAddress();
+ }
+
+ void reset()
+ {
+ CodeLocationCommon::reset();
+ }
private:
- explicit CodeLocationLabel(void* location)
+ explicit CodeLocationLabel(CodePtr location)
: CodeLocationCommon(location)
{
}
- void* getJumpDestination() { return this->m_location; }
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(CodePtr(location))
+ {
+ }
+
+ void* getJumpDestination() { return this->executableAddress(); }
};
// CodeLocationJump:
@@ -462,12 +543,12 @@ public:
void relink(CodeLocationLabel destination)
{
- AssemblerType::patchJump(reinterpret_cast<intptr_t>(this->m_location), destination.m_location);
+ AssemblerType::relinkJump(this->dataLocation(), destination.dataLocation());
}
private:
explicit CodeLocationJump(void* location)
- : CodeLocationCommon(location)
+ : CodeLocationCommon(CodePtr(location))
{
}
};
@@ -478,29 +559,47 @@ public:
class CodeLocationCall : public CodeLocationCommon {
friend class CodeLocationCommon;
friend class PatchBuffer;
+ friend class ProcessorReturnAddress;
public:
CodeLocationCall()
{
}
- template<typename FunctionSig>
- void relink(FunctionSig* function)
+ void relink(CodeLocationLabel destination)
{
- AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function));
+#if PLATFORM(X86_64)
+ CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
+#else
+ AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
+#endif
+ }
+
+ void relink(FunctionPtr destination)
+ {
+#if PLATFORM(X86_64)
+ CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
+#else
+ AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
+#endif
}
// This methods returns the value that will be set as the return address
// within a function that has been called from this call instruction.
void* calleeReturnAddressValue()
{
- return this->m_location;
+ return this->executableAddress();
}
private:
- explicit CodeLocationCall(void* location)
+ explicit CodeLocationCall(CodePtr location)
: CodeLocationCommon(location)
{
}
+
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(CodePtr(location))
+ {
+ }
};
// CodeLocationNearCall:
@@ -509,29 +608,44 @@ public:
class CodeLocationNearCall : public CodeLocationCommon {
friend class CodeLocationCommon;
friend class PatchBuffer;
+ friend class ProcessorReturnAddress;
public:
CodeLocationNearCall()
{
}
- template<typename FunctionSig>
- void relink(FunctionSig* function)
+ void relink(CodePtr destination)
+ {
+ AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
+ }
+
+ void relink(CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
+ }
+
+ void relink(FunctionPtr destination)
{
- AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function));
+ AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
}
// This methods returns the value that will be set as the return address
// within a function that has been called from this call instruction.
void* calleeReturnAddressValue()
{
- return this->m_location;
+ return this->executableAddress();
}
private:
- explicit CodeLocationNearCall(void* location)
+ explicit CodeLocationNearCall(CodePtr location)
: CodeLocationCommon(location)
{
}
+
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(CodePtr(location))
+ {
+ }
};
// CodeLocationDataLabel32:
@@ -547,12 +661,12 @@ public:
void repatch(int32_t value)
{
- AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(this->m_location), value);
+ AssemblerType::repatchInt32(this->dataLocation(), value);
}
private:
explicit CodeLocationDataLabel32(void* location)
- : CodeLocationCommon(location)
+ : CodeLocationCommon(CodePtr(location))
{
}
};
@@ -570,12 +684,12 @@ public:
void repatch(void* value)
{
- AssemblerType::patchPointer(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<intptr_t>(value));
+ AssemblerType::repatchPointer(this->dataLocation(), value);
}
private:
explicit CodeLocationDataLabelPtr(void* location)
- : CodeLocationCommon(location)
+ : CodeLocationCommon(CodePtr(location))
{
}
};
@@ -584,36 +698,55 @@ public:
//
// This class can be used to relink a call identified by its return address.
class ProcessorReturnAddress {
+ friend class CodeLocationCall;
+ friend class CodeLocationNearCall;
public:
ProcessorReturnAddress(void* location)
: m_location(location)
{
}
- template<typename FunctionSig>
- void relinkCallerToFunction(FunctionSig* newCalleeFunction)
+ void relinkCallerToTrampoline(CodeLocationLabel label)
+ {
+ CodeLocationCall(CodePtr(m_location)).relink(label);
+ }
+
+ void relinkCallerToTrampoline(CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(FunctionPtr function)
{
- AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction));
+ CodeLocationCall(CodePtr(m_location)).relink(function);
}
- template<typename FunctionSig>
- void relinkNearCallerToFunction(FunctionSig* newCalleeFunction)
+ void relinkNearCallerToTrampoline(CodeLocationLabel label)
{
- AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction));
+ CodeLocationNearCall(CodePtr(m_location)).relink(label);
}
- operator void*()
+ void relinkNearCallerToTrampoline(CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
+ }
+
+ void* addressForLookup()
{
- return m_location;
+ return m_location.value();
}
private:
- void* m_location;
+ ReturnAddressPtr m_location;
};
- // Section 4: The patch buffer - utility to finalize code generation.
+ // Section 4: PatchBuffer - utility to finalize code generation.
+ static CodePtr trampolineAt(CodeRef ref, Label label)
+ {
+ return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
+ }
// PatchBuffer:
//
@@ -632,69 +765,59 @@ public:
// FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
// address of calls, as opposed to a point that can be used to later relink a Jump -
// possibly wrap the later up in an object that can do just that).
- class PatchBuffer {
+ class PatchBuffer : public Noncopyable {
public:
- PatchBuffer(void* code)
- : m_code(code)
+ // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
+ // First, executablePool is copied into m_executablePool, then the initialization of
+ // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
+ PatchBuffer(AbstractMacroAssembler<AssemblerType>* masm, PassRefPtr<ExecutablePool> executablePool)
+ : m_executablePool(executablePool)
+ , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
+ , m_size(masm->m_assembler.size())
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
{
}
- CodeLocationLabel entry()
+ ~PatchBuffer()
{
- return CodeLocationLabel(m_code);
+ ASSERT(m_completed);
}
- void* trampolineAt(Label label)
- {
- return AssemblerType::getRelocatedAddress(m_code, label.m_label);
- }
-
// These methods are used to link or set values at code generation time.
- template<typename FunctionSig>
- void link(Call call, FunctionSig* function)
+ void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
#if PLATFORM(X86_64)
- if (call.isFlagSet(Call::Near)) {
- AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function));
- } else {
- intptr_t callLocation = reinterpret_cast<intptr_t>(AssemblerType::getRelocatedAddress(m_code, call.m_jmp));
- AssemblerType::patchMacroAssemblerCall(callLocation, reinterpret_cast<void*>(function));
- }
-#else
- AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function));
+ if (!call.isFlagSet(Call::Near)) {
+ char* callLocation = reinterpret_cast<char*>(AssemblerType::getRelocatedAddress(code(), call.m_jmp)) - REPTACH_OFFSET_CALL_R11;
+ AssemblerType::patchPointerForCall(callLocation, function.value());
+ } else
#endif
+ AssemblerType::linkCall(code(), call.m_jmp, function.value());
}
- template<typename FunctionSig>
- void linkTailRecursive(Jump jump, FunctionSig* function)
- {
- AssemblerType::linkJump(m_code, jump.m_jmp, reinterpret_cast<void*>(function));
- }
-
- template<typename FunctionSig>
- void linkTailRecursive(JumpList list, FunctionSig* function)
- {
- for (unsigned i = 0; i < list.m_jumps.size(); ++i) {
- AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, reinterpret_cast<void*>(function));
- }
- }
-
void link(Jump jump, CodeLocationLabel label)
{
- AssemblerType::linkJump(m_code, jump.m_jmp, label.m_location);
+ AssemblerType::linkJump(code(), jump.m_jmp, label.dataLocation());
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, label.m_location);
+ AssemblerType::linkJump(code(), list.m_jumps[i].m_jmp, label.dataLocation());
}
void patch(DataLabelPtr label, void* value)
{
- AssemblerType::patchAddress(m_code, label.m_label, value);
+ AssemblerType::patchPointer(code(), label.m_label, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ AssemblerType::patchPointer(code(), label.m_label, value.getJumpDestination());
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
@@ -703,29 +826,29 @@ public:
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
- return CodeLocationCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp));
+ return CodeLocationCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp));
+ return CodeLocationNearCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
}
CodeLocationLabel locationOf(Label label)
{
- return CodeLocationLabel(AssemblerType::getRelocatedAddress(m_code, label.m_label));
+ return CodeLocationLabel(AssemblerType::getRelocatedAddress(code(), label.m_label));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
{
- return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(m_code, label.m_label));
+ return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(code(), label.m_label));
}
CodeLocationDataLabel32 locationOf(DataLabel32 label)
{
- return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(m_code, label.m_label));
+ return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(code(), label.m_label));
}
// This method obtains the return address of the call, given as an offset from
@@ -735,8 +858,47 @@ public:
return AssemblerType::getCallReturnOffset(call.m_jmp);
}
+ // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
+ // once to complete generation of the code. 'finalizeCode()' is suited to situations
+ // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
+ // suited to adding to an existing allocation.
+ CodeRef finalizeCode()
+ {
+ performFinalization();
+
+ return CodeRef(m_code, m_executablePool, m_size);
+ }
+ CodeLocationLabel finalizeCodeAddendum()
+ {
+ performFinalization();
+
+ return CodeLocationLabel(code());
+ }
+
private:
+ // Keep this private! - the underlying code should only be obtained externally via
+ // finalizeCode() or finalizeCodeAddendum().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void performFinalization()
+ {
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ m_completed = true;
+#endif
+
+ ExecutableAllocator::makeExecutable(code(), m_size);
+ }
+
+ RefPtr<ExecutablePool> m_executablePool;
void* m_code;
+ size_t m_size;
+#ifndef NDEBUG
+ bool m_completed;
+#endif
};
@@ -747,11 +909,6 @@ public:
return m_assembler.size();
}
- void* copyCode(ExecutablePool* allocator)
- {
- return m_assembler.executableCopy(allocator);
- }
-
Label label()
{
return Label(this);
@@ -793,6 +950,11 @@ public:
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
+ ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
ptrdiff_t differenceBetween(DataLabelPtr from, Call to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
@@ -804,36 +966,47 @@ protected:
template <class AssemblerType>
+typename AbstractMacroAssembler<AssemblerType>::CodeLocationInstruction AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::instructionAtOffset(int offset)
+{
+ return typename AbstractMacroAssembler::CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+template <class AssemblerType>
typename AbstractMacroAssembler<AssemblerType>::CodeLocationLabel AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::labelAtOffset(int offset)
{
- return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(m_location) + offset);
+ return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
}
template <class AssemblerType>
typename AbstractMacroAssembler<AssemblerType>::CodeLocationJump AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::jumpAtOffset(int offset)
{
- return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(m_location) + offset);
+ return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
}
template <class AssemblerType>
typename AbstractMacroAssembler<AssemblerType>::CodeLocationCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::callAtOffset(int offset)
{
- return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(m_location) + offset);
+ return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+template <class AssemblerType>
+typename AbstractMacroAssembler<AssemblerType>::CodeLocationNearCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ return typename AbstractMacroAssembler::CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
}
template <class AssemblerType>
typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabelPtr AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabelPtrAtOffset(int offset)
{
- return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(m_location) + offset);
+ return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
}
template <class AssemblerType>
typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabel32 AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabel32AtOffset(int offset)
{
- return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(m_location) + offset);
+ return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
}
-
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/JavaScriptCore/assembler/AssemblerBuffer.h b/JavaScriptCore/assembler/AssemblerBuffer.h
index e1f53d8..7a5a8d3 100644
--- a/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -132,6 +132,8 @@ namespace JSC {
if (!result)
return 0;
+ ExecutableAllocator::makeWritable(result, m_size);
+
return memcpy(result, m_buffer, m_size);
}
diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h
index 71ac1f6..c9749a0 100644
--- a/JavaScriptCore/assembler/MacroAssembler.h
+++ b/JavaScriptCore/assembler/MacroAssembler.h
@@ -30,7 +30,11 @@
#if ENABLE(ASSEMBLER)
-#if PLATFORM(X86)
+#if PLATFORM(ARM_V7)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif PLATFORM(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
@@ -242,6 +246,11 @@ public:
store32(src, address);
}
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
void storePtr(ImmPtr imm, ImplicitAddress address)
{
store32(Imm32(imm), address);
diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 0000000..5ccbd43
--- /dev/null
+++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1063 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+ // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+ // - dTR is likely used more than aTR, and we'll get better instruction
+ // encoding if it's in the low 8 registers.
+ static const ARM::RegisterID dataTempRegister = ARM::ip;
+ static const RegisterID addressTempRegister = ARM::r3;
+ static const FPRegisterID fpTempRegister = ARM::d7;
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+
+ static const Scale ScalePtr = TimesFour;
+
+ enum Condition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE,
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ };
+
+ static const RegisterID stackPointerRegister = ARM::sp;
+ static const RegisterID linkRegister = ARM::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, dest, dataTempRegister);
+ }
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, imm.m_value);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, shift_amount);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.mvn(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, dest, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, shift_amount);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eor(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, dest, dataTempRegister);
+ }
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+
+ // Floating-point operations:
+
+ bool supportsFloatingPoint() const { return true; }
+ // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
+ // If a value is not representable as an integer, and possibly for some values that are,
+ // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
+ // a branch will be taken. It is not clear whether this interface will be well suited to
+ // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
+ // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
+ // temporary solution while we work out what this interface should be. Either we need to
+ // decide to make this interface work on all platforms, rework the interface to make it more
+ // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
+ // operations, and make clients go directly to the m_assembler to plant truncation instructions.
+ // In short, FIXME:.
+ bool supportsFloatingPointTruncate() const { return false; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_F64(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_F64(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_F64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src);
+ m_assembler.vcvt_F64_S32(dest, fpTempRegister);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_F64(left, right);
+ m_assembler.vmrs_APSR_nzcv_FPSCR();
+ return makeBranch(cond);
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
+ {
+ ASSERT_NOT_REACHED();
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARM::sp, sizeof(void*), false, true);
+ }
+
+ void push(RegisterID src)
+ {
+ // store preindexed with writeback
+ m_assembler.str(src, ARM::sp, -sizeof(void*), true, true);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ if (imm.m_isPointer)
+ moveFixedWidthEncoding(imm, dest);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, Imm32 right)
+ {
+ int32_t imm = right.m_value;
+ if (!imm)
+ m_assembler.tst(left, left);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(Imm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+ }
+
+ void test32(RegisterID reg, Imm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
+ move(mask, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ load16(left, dataTempRegister);
+ m_assembler.lsl(addressTempRegister, right, 16);
+ m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
+ return branch32(cond, dataTempRegister, addressTempRegister);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load16(left, addressTempRegister);
+ m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
+ return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(makeJump());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.add_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.sub_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+
+ DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(Imm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
+
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+protected:
+ ARMv7Assembler::JmpSrc makeJump()
+ {
+ return m_assembler.b();
+ }
+
+ ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.it(cond);
+ return m_assembler.b();
+ }
+ ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
+ ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ DataLabel32 moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(Condition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 0000000..0aa985c
--- /dev/null
+++ b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include <wtf/Platform.h>
+
+#include "ExecutableAllocator.h"
+#include "PassRefPtr.h"
+#include "RefPtr.h"
+#include "UnusedParam.h"
+
+#if ENABLE(ASSEMBLER)
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if PLATFORM(ARM_V7)
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#endif
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+ FunctionPtr()
+ : m_value(0)
+ {
+ }
+
+ template<typename FunctionType>
+ explicit FunctionPtr(FunctionType* value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+ void* executableAddress() const { return m_value; }
+
+
+private:
+ void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code. We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+ ReturnAddressPtr()
+ : m_value(0)
+ {
+ }
+
+ explicit ReturnAddressPtr(void* value)
+ : m_value(value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+ MacroAssemblerCodePtr()
+ : m_value(0)
+ {
+ }
+
+ explicit MacroAssemblerCodePtr(void* value)
+#if PLATFORM(ARM_V7)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+ : m_value(value)
+#endif
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+ : m_value(ra.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* executableAddress() const { return m_value; }
+#if PLATFORM(ARM_V7)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code. A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+public:
+ MacroAssemblerCodeRef()
+#ifndef NDEBUG
+ : m_size(0)
+#endif
+ {
+ }
+
+ MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+ : m_code(code)
+ , m_executablePool(executablePool)
+ {
+#ifndef NDEBUG
+ m_size = size;
+#else
+ UNUSED_PARAM(size);
+#endif
+ }
+
+ MacroAssemblerCodePtr m_code;
+ RefPtr<ExecutablePool> m_executablePool;
+#ifndef NDEBUG
+ size_t m_size;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerCodeRef_h
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h
index b85b8b2..aaf98fd 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -36,10 +36,17 @@ namespace JSC {
class MacroAssemblerX86 : public MacroAssemblerX86Common {
public:
+ MacroAssemblerX86()
+ : m_isSSE2Present(isSSE2Present())
+ {
+ }
+
static const Scale ScalePtr = TimesFour;
using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::or32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::branch32;
@@ -55,6 +62,21 @@ public:
m_assembler.addl_im(imm.m_value, address.m_ptr);
}
+ void addWithCarry32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.adcl_im(imm.m_value, address.m_ptr);
+ }
+
+ void and32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.andl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.orl_im(imm.m_value, address.m_ptr);
+ }
+
void sub32(Imm32 imm, AbsoluteAddress address)
{
m_assembler.subl_im(imm.m_value, address.m_ptr);
@@ -70,16 +92,21 @@ public:
m_assembler.movl_i32m(imm.m_value, address);
}
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.movl_rm(src, address);
+ }
+
Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
{
m_assembler.cmpl_rm(right, left.m_ptr);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
{
m_assembler.cmpl_im(right.m_value, left.m_ptr);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Call call()
@@ -98,25 +125,45 @@ public:
}
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
dataLabel = DataLabelPtr(this);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
dataLabel = DataLabelPtr(this);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
- DataLabelPtr storePtrWithPatch(Address address)
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
{
- m_assembler.movl_i32m(0, address.offset, address.base);
+ m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
return DataLabelPtr(this);
}
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
+ bool supportsFloatingPoint() const { return m_isSSE2Present; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
+
+private:
+ const bool m_isSSE2Present;
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index 5fcd25d..cea691e 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -38,20 +38,30 @@ namespace JSC {
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
public:
- typedef X86Assembler::Condition Condition;
- static const Condition Equal = X86Assembler::ConditionE;
- static const Condition NotEqual = X86Assembler::ConditionNE;
- static const Condition Above = X86Assembler::ConditionA;
- static const Condition AboveOrEqual = X86Assembler::ConditionAE;
- static const Condition Below = X86Assembler::ConditionB;
- static const Condition BelowOrEqual = X86Assembler::ConditionBE;
- static const Condition GreaterThan = X86Assembler::ConditionG;
- static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
- static const Condition LessThan = X86Assembler::ConditionL;
- static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
- static const Condition Overflow = X86Assembler::ConditionO;
- static const Condition Zero = X86Assembler::ConditionE;
- static const Condition NonZero = X86Assembler::ConditionNE;
+ enum Condition {
+ Equal = X86Assembler::ConditionE,
+ NotEqual = X86Assembler::ConditionNE,
+ Above = X86Assembler::ConditionA,
+ AboveOrEqual = X86Assembler::ConditionAE,
+ Below = X86Assembler::ConditionB,
+ BelowOrEqual = X86Assembler::ConditionBE,
+ GreaterThan = X86Assembler::ConditionG,
+ GreaterThanOrEqual = X86Assembler::ConditionGE,
+ LessThan = X86Assembler::ConditionL,
+ LessThanOrEqual = X86Assembler::ConditionLE,
+ Overflow = X86Assembler::ConditionO,
+ Signed = X86Assembler::ConditionS,
+ Zero = X86Assembler::ConditionE,
+ NonZero = X86Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = X86Assembler::ConditionE,
+ DoubleGreaterThan = X86Assembler::ConditionA,
+ DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+ DoubleLessThan = X86Assembler::ConditionB,
+ DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ };
static const RegisterID stackPointerRegister = X86::esp;
@@ -92,6 +102,11 @@ public:
m_assembler.andl_ir(imm.m_value, dest);
}
+ void and32(Imm32 imm, Address address)
+ {
+ m_assembler.andl_im(imm.m_value, address.offset, address.base);
+ }
+
void lshift32(Imm32 imm, RegisterID dest)
{
m_assembler.shll_i8r(imm.m_value, dest);
@@ -144,6 +159,11 @@ public:
m_assembler.orl_ir(imm.m_value, dest);
}
+ void or32(Imm32 imm, Address address)
+ {
+ m_assembler.orl_im(imm.m_value, address.offset, address.base);
+ }
+
void rshift32(RegisterID shift_amount, RegisterID dest)
{
// On x86 we can only shift by ecx; if asked to shift by another register we'll
@@ -250,7 +270,84 @@ public:
{
m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
}
-
+
+
+ // Floating-point operation:
+ //
+ // Presently only supports SSE, not x87 floating point.
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, dest);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_rr(src, dest);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_mr(src.offset, src.base, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_rr(src, dest);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_mr(src.offset, src.base, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_rr(src, dest);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_mr(src.offset, src.base, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_rr(src, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.ucomisd_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(Equal, dest, Imm32(0x80000000));
+ }
+
// Stack manipulation operations:
//
@@ -280,6 +377,7 @@ public:
m_assembler.push_i32(imm.m_value);
}
+
// Register move operations:
//
// Move values in registers.
@@ -327,7 +425,8 @@ public:
#else
void move(RegisterID src, RegisterID dest)
{
- m_assembler.movl_rr(src, dest);
+ if (src != dest)
+ m_assembler.movl_rr(src, dest);
}
void move(ImmPtr imm, RegisterID dest)
@@ -337,19 +436,18 @@ public:
void swap(RegisterID reg1, RegisterID reg2)
{
- m_assembler.xchgl_rr(reg1, reg2);
+ if (reg1 != reg2)
+ m_assembler.xchgl_rr(reg1, reg2);
}
void signExtend32ToPtr(RegisterID src, RegisterID dest)
{
- if (src != dest)
- move(src, dest);
+ move(src, dest);
}
void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
{
- if (src != dest)
- move(src, dest);
+ move(src, dest);
}
#endif
@@ -376,7 +474,7 @@ public:
Jump branch32(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpl_rr(right, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, RegisterID left, Imm32 right)
@@ -385,38 +483,52 @@ public:
m_assembler.testl_rr(left, left);
else
m_assembler.cmpl_ir(right.m_value, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, RegisterID left, Address right)
{
m_assembler.cmpl_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, Address left, RegisterID right)
{
m_assembler.cmpl_rm(right, left.offset, left.base);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, Address left, Imm32 right)
{
m_assembler.cmpl_im(right.m_value, left.offset, left.base);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch16(Condition cond, BaseIndex left, RegisterID right)
{
m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFF0000));
+
+ m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
m_assembler.testl_rr(reg, mask);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
@@ -429,7 +541,7 @@ public:
m_assembler.testb_i8r(mask.m_value, reg);
else
m_assembler.testl_i32r(mask.m_value, reg);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
@@ -439,7 +551,7 @@ public:
m_assembler.cmpl_im(0, address.offset, address.base);
else
m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
@@ -449,7 +561,7 @@ public:
m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
else
m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump jump()
@@ -481,44 +593,44 @@ public:
Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
-
+
Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(imm, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT(cond == Overflow);
mul32(src, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT(cond == Overflow);
mul32(imm, src, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
@@ -539,6 +651,11 @@ public:
return Call(m_assembler.call(target), Call::None);
}
+ void call(Address address)
+ {
+ m_assembler.call_m(address.offset, address.base);
+ }
+
void ret()
{
m_assembler.ret();
@@ -547,7 +664,7 @@ public:
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpl_rr(right, left);
- m_assembler.setCC_r(cond, dest);
+ m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
@@ -557,7 +674,7 @@ public:
m_assembler.testl_rr(left, left);
else
m_assembler.cmpl_ir(right.m_value, left);
- m_assembler.setCC_r(cond, dest);
+ m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
@@ -571,9 +688,89 @@ public:
m_assembler.cmpl_im(0, address.offset, address.base);
else
m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- m_assembler.setCC_r(cond, dest);
+ m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
+
+protected:
+ X86Assembler::Condition x86Condition(Condition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ X86Assembler::Condition x86Condition(DoubleCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+private:
+ // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+ // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+ friend class MacroAssemblerX86;
+
+#if PLATFORM(X86)
+#if PLATFORM(MAC)
+
+ // All X86 Macs are guaranteed to support at least SSE2,
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#else // PLATFORM(MAC)
+
+ enum SSE2CheckState {
+ NotCheckedSSE2,
+ HasSSE2,
+ NoSSE2
+ };
+
+ static bool isSSE2Present()
+ {
+ if (s_sse2CheckState == NotCheckedSSE2) {
+ // Default the flags value to zero; if the compiler is
+ // not MSVC or GCC we will read this as SSE2 not present.
+ int flags = 0;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#elif COMPILER(GCC)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
+#endif
+ static const int SSE2FeatureBit = 1 << 26;
+ s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+ }
+ // Only check once.
+ ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+ return s_sse2CheckState == HasSSE2;
+ }
+
+ static SSE2CheckState s_sse2CheckState;
+
+#endif // PLATFORM(MAC)
+#elif !defined(NDEBUG) // PLATFORM(X86)
+
+ // On x86-64 we should never be checking for SSE2 in a non-debug build,
+ // but non debug add this method to keep the asserts above happy.
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#endif
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 971787b..ffdca7c 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -42,6 +42,8 @@ public:
static const Scale ScalePtr = TimesEight;
using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::or32;
using MacroAssemblerX86Common::sub32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
@@ -53,6 +55,18 @@ public:
add32(imm, Address(scratchRegister));
}
+ void and32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ and32(imm, Address(scratchRegister));
+ }
+
+ void or32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ or32(imm, Address(scratchRegister));
+ }
+
void sub32(Imm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
@@ -122,9 +136,20 @@ public:
void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
{
- m_assembler.leal_mr(imm.m_value, src, dest);
+ m_assembler.leaq_mr(imm.m_value, src, dest);
}
+ void addPtr(Imm32 imm, Address address)
+ {
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ }
+
+ void addPtr(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ addPtr(imm, Address(scratchRegister));
+ }
+
void andPtr(RegisterID src, RegisterID dest)
{
m_assembler.andq_rr(src, dest);
@@ -241,6 +266,17 @@ public:
{
m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
}
+
+ void storePtr(RegisterID src, void* address)
+ {
+ if (src == X86::eax)
+ m_assembler.movq_EAXm(address);
+ else {
+ swap(X86::eax, src);
+ m_assembler.movq_EAXm(address);
+ swap(X86::eax, src);
+ }
+ }
void storePtr(ImmPtr imm, ImplicitAddress address)
{
@@ -259,20 +295,30 @@ public:
return DataLabel32(this);
}
+ void movePtrToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
m_assembler.testq_rr(left, left);
else
m_assembler.cmpq_ir(right.m_value, left);
- m_assembler.setCC_r(cond, dest);
+ m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpq_rr(right, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
@@ -283,7 +329,7 @@ public:
m_assembler.testq_rr(left, left);
else
m_assembler.cmpq_ir(imm, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
} else {
move(right, scratchRegister);
return branchPtr(cond, left, scratchRegister);
@@ -293,7 +339,7 @@ public:
Jump branchPtr(Condition cond, RegisterID left, Address right)
{
m_assembler.cmpq_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
@@ -305,7 +351,7 @@ public:
Jump branchPtr(Condition cond, Address left, RegisterID right)
{
m_assembler.cmpq_rm(right, left.offset, left.base);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, Address left, ImmPtr right)
@@ -317,7 +363,7 @@ public:
Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
{
m_assembler.testq_rr(reg, mask);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
@@ -329,7 +375,7 @@ public:
m_assembler.testb_i8r(mask.m_value, reg);
else
m_assembler.testq_i32r(mask.m_value, reg);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
@@ -338,7 +384,7 @@ public:
m_assembler.cmpq_im(0, address.offset, address.base);
else
m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
@@ -347,7 +393,7 @@ public:
m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
else
m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
@@ -355,14 +401,14 @@ public:
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
addPtr(src, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
subPtr(imm, dest);
- return Jump(m_assembler.jCC(cond));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
}
DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
@@ -383,12 +429,23 @@ public:
return branchPtr(cond, left, scratchRegister);
}
- DataLabelPtr storePtrWithPatch(Address address)
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
{
- DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
+ DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
storePtr(scratchRegister, address);
return label;
}
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ loadPtr(address, dest);
+ return label;
+ }
+
+ bool supportsFloatingPoint() const { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return true; }
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h
index bcafda1..7a8b58d 100644
--- a/JavaScriptCore/assembler/X86Assembler.h
+++ b/JavaScriptCore/assembler/X86Assembler.h
@@ -41,8 +41,6 @@ inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(sign
#if PLATFORM(X86_64)
inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
-
-#define REPTACH_OFFSET_CALL_R11 3
#endif
namespace X86 {
@@ -84,6 +82,7 @@ class X86Assembler {
public:
typedef X86::RegisterID RegisterID;
typedef X86::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
typedef enum {
ConditionO,
@@ -193,6 +192,7 @@ private:
typedef enum {
GROUP1_OP_ADD = 0,
GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
GROUP1_OP_AND = 4,
GROUP1_OP_SUB = 5,
GROUP1_OP_XOR = 6,
@@ -295,6 +295,19 @@ public:
// Arithmetic operations:
+#if !PLATFORM(X86_64)
+ void adcl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
void addl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
@@ -343,6 +356,17 @@ public:
m_formatter.immediate32(imm);
}
}
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
#else
void addl_im(int imm, void* addr)
{
@@ -372,6 +396,17 @@ public:
}
}
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
#if PLATFORM(X86_64)
void andq_rr(RegisterID src, RegisterID dst)
{
@@ -388,6 +423,17 @@ public:
m_formatter.immediate32(imm);
}
}
+#else
+ void andl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
#endif
void notl_r(RegisterID dst)
@@ -416,6 +462,17 @@ public:
}
}
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
#if PLATFORM(X86_64)
void orq_rr(RegisterID src, RegisterID dst)
{
@@ -432,6 +489,17 @@ public:
m_formatter.immediate32(imm);
}
}
+#else
+ void orl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
#endif
void subl_rr(RegisterID src, RegisterID dst)
@@ -726,6 +794,19 @@ public:
m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
}
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
void testl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
@@ -774,6 +855,12 @@ public:
}
#endif
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
void testb_i8r(int imm, RegisterID dst)
{
m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
@@ -918,6 +1005,12 @@ public:
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
}
+ void movq_EAXm(void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
void movq_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
@@ -952,6 +1045,14 @@ public:
#else
+ void movl_rm(RegisterID src, void* addr)
+ {
+ if (src == X86::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
void movl_mr(void* addr, RegisterID dst)
{
if (dst == X86::eax)
@@ -989,6 +1090,12 @@ public:
{
m_formatter.oneByteOp(OP_LEA, dst, base, offset);
}
+#if PLATFORM(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
// Flow control:
@@ -1003,6 +1110,11 @@ public:
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
return JmpSrc(m_formatter.size());
}
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
JmpSrc jmp()
{
@@ -1241,74 +1353,83 @@ public:
}
// Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
void linkJump(JmpSrc from, JmpDst to)
{
- ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
-
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
+ ASSERT(to.m_offset != -1);
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ patchRel32(code + from.m_offset, code + to.m_offset);
}
static void linkJump(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
- ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
- ASSERT(linkOffset == static_cast<int>(linkOffset));
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
+
+ patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
-
- static void patchJump(intptr_t where, void* destination)
+
+ static void linkCall(void* code, JmpSrc from, void* to)
{
- intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
- ASSERT(offset == static_cast<int32_t>(offset));
- reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
+ ASSERT(from.m_offset != -1);
+
+ patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
-
+
#if PLATFORM(X86_64)
- // FIXME: transition these functions out of here - the assembler
- // shouldn't know that that this is mov/call pair using r11. :-/
- static void patchMacroAssemblerCall(intptr_t where, void* destination)
+ static void patchPointerForCall(void* where, void* value)
{
- patchAddress(reinterpret_cast<void*>(where - REPTACH_OFFSET_CALL_R11), JmpDst(0), destination);
+ reinterpret_cast<void**>(where)[-1] = value;
}
-#else
- static void patchMacroAssemblerCall(intptr_t where, void* destination)
+#endif
+
+ static void patchPointer(void* code, JmpDst where, void* value)
{
- intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
- ASSERT(offset == static_cast<int32_t>(offset));
- reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
+ ASSERT(where.m_offset != -1);
+
+ patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
-#endif
- void linkCall(JmpSrc from, JmpDst to)
+ static void relinkJump(void* from, void* to)
{
- ASSERT(to.m_offset != -1);
- ASSERT(from.m_offset != -1);
-
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
+ patchRel32(from, to);
}
- static void linkCall(void* code, JmpSrc from, void* to)
+ static void relinkCall(void* from, void* to)
{
- ASSERT(from.m_offset != -1);
- ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
- ASSERT(linkOffset == static_cast<int>(linkOffset));
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
+ patchRel32(from, to);
}
- static void patchCall(intptr_t where, void* destination)
+ static void repatchInt32(void* where, int32_t value)
{
- intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
- ASSERT(offset == static_cast<int32_t>(offset));
- reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(int32_t), sizeof(int32_t));
+ patchInt32(where, value);
}
- static void patchAddress(void* code, JmpDst position, void* value)
+ static void repatchPointer(void* where, void* value)
{
- ASSERT(position.m_offset != -1);
-
- reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(void*), sizeof(void*));
+ patchPointer(where, value);
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+#if PLATFORM(X86_64)
+ // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
+ // Skip over the prefix byte.
+ where = reinterpret_cast<char*>(where) + 1;
+#endif
+ ExecutableAllocator::MakeWritable unprotect(where, 1);
+ *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
}
static unsigned getCallReturnOffset(JmpSrc call)
@@ -1319,6 +1440,8 @@ public:
static void* getRelocatedAddress(void* code, JmpSrc jump)
{
+ ASSERT(jump.m_offset != -1);
+
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
}
@@ -1344,16 +1467,6 @@ public:
return dst.m_offset - src.m_offset;
}
- static void patchImmediate(intptr_t where, int32_t value)
- {
- reinterpret_cast<int32_t*>(where)[-1] = value;
- }
-
- static void patchPointer(intptr_t where, intptr_t value)
- {
- reinterpret_cast<intptr_t*>(where)[-1] = value;
- }
-
void* executableCopy(ExecutablePool* allocator)
{
void* copy = m_formatter.executableCopy(allocator);
@@ -1363,6 +1476,24 @@ public:
private:
+ static void patchPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void patchInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void patchRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ patchInt32(from, offset);
+ }
+
class X86InstructionFormatter {
static const int maxInstructionSize = 16;
@@ -1604,6 +1735,11 @@ private:
m_buffer.putByteUnchecked(imm);
}
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
void immediate32(int imm)
{
m_buffer.putIntUnchecked(imm);