summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r--JavaScriptCore/assembler/ARMAssembler.cpp17
-rw-r--r--JavaScriptCore/assembler/ARMAssembler.h59
-rw-r--r--JavaScriptCore/assembler/ARMv7Assembler.h56
-rw-r--r--JavaScriptCore/assembler/AbstractMacroAssembler.h24
-rw-r--r--JavaScriptCore/assembler/AssemblerBuffer.h2
-rw-r--r--JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h2
-rw-r--r--JavaScriptCore/assembler/CodeLocation.h1
-rw-r--r--JavaScriptCore/assembler/LinkBuffer.h2
-rw-r--r--JavaScriptCore/assembler/MIPSAssembler.h944
-rw-r--r--JavaScriptCore/assembler/MacroAssembler.h13
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARM.h113
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARMv7.h88
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerCodeRef.h2
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerMIPS.h1711
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86.h5
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86Common.h76
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerX86_64.h13
-rw-r--r--JavaScriptCore/assembler/RepatchBuffer.h2
-rw-r--r--JavaScriptCore/assembler/X86Assembler.h58
19 files changed, 3115 insertions, 73 deletions
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp
index 6dd2b87..a181b7e 100644
--- a/JavaScriptCore/assembler/ARMAssembler.cpp
+++ b/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -262,28 +262,29 @@ ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
// Memory load/store helpers
-void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
+void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes)
{
+ ARMWord transferFlag = bytes ? DT_BYTE : 0;
if (offset >= 0) {
if (offset <= 0xfff)
- dtr_u(isLoad, srcDst, base, offset);
+ dtr_u(isLoad, srcDst, base, offset | transferFlag);
else if (offset <= 0xfffff) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
- dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+ dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
} else {
ARMWord reg = getImm(offset, ARMRegisters::S0);
- dtr_ur(isLoad, srcDst, base, reg);
+ dtr_ur(isLoad, srcDst, base, reg | transferFlag);
}
} else {
offset = -offset;
if (offset <= 0xfff)
- dtr_d(isLoad, srcDst, base, offset);
+ dtr_d(isLoad, srcDst, base, offset | transferFlag);
else if (offset <= 0xfffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
- dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+ dtr_d(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
} else {
ARMWord reg = getImm(offset, ARMRegisters::S0);
- dtr_dr(isLoad, srcDst, base, reg);
+ dtr_dr(isLoad, srcDst, base, reg | transferFlag);
}
}
}
@@ -356,7 +357,7 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator)
int pos = (*iter) & (~0x1);
ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
ARMWord* addr = getLdrImmAddress(ldrAddr);
- if (*addr != 0xffffffff) {
+ if (*addr != InvalidBranchTarget) {
if (!(*iter & 1)) {
int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
diff --git a/JavaScriptCore/assembler/ARMAssembler.h b/JavaScriptCore/assembler/ARMAssembler.h
index 6967b37..2ca0949 100644
--- a/JavaScriptCore/assembler/ARMAssembler.h
+++ b/JavaScriptCore/assembler/ARMAssembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 University of Szeged
+ * Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -27,8 +27,6 @@
#ifndef ARMAssembler_h
#define ARMAssembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "AssemblerBufferWithConstantPool.h"
@@ -125,6 +123,7 @@ namespace JSC {
FSUBD = 0x0e300b40,
FMULD = 0x0e200b00,
FCMPD = 0x0eb40b40,
+ FSQRTD = 0x0eb10bc0,
DTR = 0x05000000,
LDRH = 0x00100090,
STRH = 0x00000090,
@@ -133,6 +132,9 @@ namespace JSC {
FDTR = 0x0d000b00,
B = 0x0a000000,
BL = 0x0b000000,
+#if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
+ BX = 0x012fff10,
+#endif
FMSR = 0x0e000a10,
FMRS = 0x0e100a10,
FSITOD = 0x0eb80bc0,
@@ -141,6 +143,7 @@ namespace JSC {
#if WTF_ARM_ARCH_AT_LEAST(5)
CLZ = 0x016f0f10,
BKPT = 0xe120070,
+ BLX = 0x012fff30,
#endif
#if WTF_ARM_ARCH_AT_LEAST(7)
MOVW = 0x03000000,
@@ -155,6 +158,7 @@ namespace JSC {
SET_CC = (1 << 20),
OP2_OFSREG = (1 << 25),
DT_UP = (1 << 23),
+ DT_BYTE = (1 << 22),
DT_WB = (1 << 21),
// This flag is inlcuded in LDR and STR
DT_PRE = (1 << 24),
@@ -183,6 +187,7 @@ namespace JSC {
};
static const ARMWord INVALID_IMM = 0xf0000000;
+ static const ARMWord InvalidBranchTarget = 0xffffffff;
static const int DefaultPrefetching = 2;
class JmpSrc {
@@ -422,6 +427,11 @@ namespace JSC {
emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
}
+ void fsqrtd_r(int dd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FSQRTD, dd, 0, dm);
+ }
+
void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
{
m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true);
@@ -548,6 +558,30 @@ namespace JSC {
#endif
}
+ void bx(int rm, Condition cc = AL)
+ {
+#if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
+ emitInst(static_cast<ARMWord>(cc) | BX, 0, 0, RM(rm));
+#else
+ mov_r(ARMRegisters::pc, RM(rm), cc);
+#endif
+ }
+
+ JmpSrc blx(int rm, Condition cc = AL)
+ {
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ int s = m_buffer.uncheckedSize();
+ emitInst(static_cast<ARMWord>(cc) | BLX, 0, 0, RM(rm));
+#else
+ ASSERT(rm != 14);
+ ensureSpace(2 * sizeof(ARMWord), 0);
+ mov_r(ARMRegisters::lr, ARMRegisters::pc, cc);
+ int s = m_buffer.uncheckedSize();
+ bx(rm, cc);
+#endif
+ return JmpSrc(s);
+ }
+
static ARMWord lsl(int reg, ARMWord value)
{
ASSERT(reg <= ARMRegisters::pc);
@@ -620,21 +654,34 @@ namespace JSC {
return label();
}
- JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
+ JmpSrc loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
{
ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
int s = m_buffer.uncheckedSize();
- ldr_un_imm(ARMRegisters::pc, 0xffffffff, cc);
+ ldr_un_imm(rd, InvalidBranchTarget, cc);
m_jumps.append(s | (useConstantPool & 0x1));
return JmpSrc(s);
}
+ JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+ }
+
void* executableCopy(ExecutablePool* allocator);
// Patching helpers
static ARMWord* getLdrImmAddress(ARMWord* insn)
{
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ // Check for call
+ if ((*insn & 0x0f7f0000) != 0x051f0000) {
+ // Must be BLX
+ ASSERT((*insn & 0x012fff30) == 0x012fff30);
+ insn--;
+ }
+#endif
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
@@ -780,7 +827,7 @@ namespace JSC {
// Memory load/store helpers
- void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
+ void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes = false);
void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);
diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h
index 4e394b2..21279f5 100644
--- a/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +27,6 @@
#ifndef ARMAssembler_h
#define ARMAssembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#include "AssemblerBuffer.h"
@@ -582,11 +581,13 @@ private:
OP_MOV_reg_T1 = 0x4600,
OP_BLX = 0x4700,
OP_BX = 0x4700,
- OP_LDRH_reg_T1 = 0x5A00,
OP_STR_reg_T1 = 0x5000,
OP_LDR_reg_T1 = 0x5800,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_LDRB_reg_T1 = 0x5C00,
OP_STR_imm_T1 = 0x6000,
OP_LDR_imm_T1 = 0x6800,
+ OP_LDRB_imm_T1 = 0x7800,
OP_LDRH_imm_T1 = 0x8800,
OP_STR_imm_T2 = 0x9000,
OP_LDR_imm_T2 = 0x9800,
@@ -631,12 +632,15 @@ private:
OP_SUB_imm_T4 = 0xF2A0,
OP_MOVT = 0xF2C0,
OP_NOP_T2a = 0xF3AF,
+ OP_LDRB_imm_T3 = 0xF810,
+ OP_LDRB_reg_T2 = 0xF810,
OP_LDRH_reg_T2 = 0xF830,
OP_LDRH_imm_T3 = 0xF830,
OP_STR_imm_T4 = 0xF840,
OP_STR_reg_T2 = 0xF840,
OP_LDR_imm_T4 = 0xF850,
OP_LDR_reg_T2 = 0xF850,
+ OP_LDRB_imm_T2 = 0xF890,
OP_LDRH_imm_T2 = 0xF8B0,
OP_STR_imm_T3 = 0xF8C0,
OP_LDR_imm_T3 = 0xF8D0,
@@ -1080,6 +1084,52 @@ public:
m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
}
+ void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt5())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 198e8d1..1c7f269 100644
--- a/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -26,8 +26,6 @@
#ifndef AbstractMacroAssembler_h
#define AbstractMacroAssembler_h
-#include <wtf/Platform.h>
-
#include <MacroAssemblerCodeRef.h>
#include <CodeLocation.h>
#include <wtf/Noncopyable.h>
@@ -83,6 +81,17 @@ public:
int32_t offset;
};
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
// ImplicitAddress:
//
// This class is used for explicit 'load' and 'store' operations
@@ -151,7 +160,7 @@ public:
// in a class requiring explicit construction in order to differentiate
// from pointers used as absolute addresses to memory operations
struct ImmPtr {
- explicit ImmPtr(void* value)
+ explicit ImmPtr(const void* value)
: m_value(value)
{
}
@@ -161,7 +170,7 @@ public:
return reinterpret_cast<intptr_t>(m_value);
}
- void* m_value;
+ const void* m_value;
};
// Imm32:
@@ -173,7 +182,7 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
-#if CPU(ARM)
+#if CPU(ARM) || CPU(MIPS)
, m_isPointer(false)
#endif
{
@@ -182,7 +191,7 @@ public:
#if !CPU(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
-#if CPU(ARM)
+#if CPU(ARM) || CPU(MIPS)
, m_isPointer(true)
#endif
{
@@ -190,13 +199,14 @@ public:
#endif
int32_t m_value;
-#if CPU(ARM)
+#if CPU(ARM) || CPU(MIPS)
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
// To avoid this problem, always generate pointers (and thus Imm32s constructed
// from ImmPtrs) with a code sequence that is able to represent any pointer
// value - don't use a more compact form in these cases.
+ // Same for MIPS.
bool m_isPointer;
#endif
};
diff --git a/JavaScriptCore/assembler/AssemblerBuffer.h b/JavaScriptCore/assembler/AssemblerBuffer.h
index 073906a..e2fb8a1 100644
--- a/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -26,8 +26,6 @@
#ifndef AssemblerBuffer_h
#define AssemblerBuffer_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "stdint.h"
diff --git a/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
index af3c3be..b1c537e 100644
--- a/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
+++ b/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -27,8 +27,6 @@
#ifndef AssemblerBufferWithConstantPool_h
#define AssemblerBufferWithConstantPool_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "AssemblerBuffer.h"
diff --git a/JavaScriptCore/assembler/CodeLocation.h b/JavaScriptCore/assembler/CodeLocation.h
index b910b6f..cab28cd 100644
--- a/JavaScriptCore/assembler/CodeLocation.h
+++ b/JavaScriptCore/assembler/CodeLocation.h
@@ -26,7 +26,6 @@
#ifndef CodeLocation_h
#define CodeLocation_h
-#include <wtf/Platform.h>
#include <MacroAssemblerCodeRef.h>
diff --git a/JavaScriptCore/assembler/LinkBuffer.h b/JavaScriptCore/assembler/LinkBuffer.h
index 6d08117..47cac5a 100644
--- a/JavaScriptCore/assembler/LinkBuffer.h
+++ b/JavaScriptCore/assembler/LinkBuffer.h
@@ -26,8 +26,6 @@
#ifndef LinkBuffer_h
#define LinkBuffer_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include <MacroAssembler.h>
diff --git a/JavaScriptCore/assembler/MIPSAssembler.h b/JavaScriptCore/assembler/MIPSAssembler.h
new file mode 100644
index 0000000..ea35268
--- /dev/null
+++ b/JavaScriptCore/assembler/MIPSAssembler.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MIPSAssembler_h
+#define MIPSAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31
+} RegisterID;
+
+typedef enum {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+ typedef MIPSRegisters::RegisterID RegisterID;
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+ typedef SegmentedVector<int, 64> Jumps;
+
+ MIPSAssembler()
+ {
+ }
+
+ // MIPS instruction opcode field position
+ enum {
+ OP_SH_RD = 11,
+ OP_SH_RT = 16,
+ OP_SH_RS = 21,
+ OP_SH_SHAMT = 6,
+ OP_SH_CODE = 16,
+ OP_SH_FD = 6,
+ OP_SH_FS = 11,
+ OP_SH_FT = 16
+ };
+
+ class JmpSrc {
+ friend class MIPSAssembler;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class MIPSAssembler;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+ void emitInst(MIPSWord op)
+ {
+ void* oldBase = m_buffer.data();
+
+ m_buffer.putInt(op);
+
+ void* newBase = m_buffer.data();
+ if (oldBase != newBase)
+ relocateJumps(oldBase, newBase);
+ }
+
+ void nop()
+ {
+ emitInst(0x00000000);
+ }
+
+ /* Need to insert one load data delay nop for mips1. */
+ void loadDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ /* Need to insert one coprocessor access delay nop for mips1. */
+ void copDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ void move(RegisterID rd, RegisterID rs)
+ {
+ /* addu */
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+ }
+
+ /* Set an immediate value to a register. This may generate 1 or 2
+ instructions. */
+ void li(RegisterID dest, int imm)
+ {
+ if (imm >= -32768 && imm <= 32767)
+ addiu(dest, MIPSRegisters::zero, imm);
+ else if (imm >= 0 && imm < 65536)
+ ori(dest, MIPSRegisters::zero, imm);
+ else {
+ lui(dest, imm >> 16);
+ if (imm & 0xffff)
+ ori(dest, dest, imm);
+ }
+ }
+
+ void lui(RegisterID rt, int imm)
+ {
+ emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void addiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (imm & 0xffff));
+ }
+
+ void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void mult(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mfhi(RegisterID rd)
+ {
+ emitInst(0x00000010 | (rd << OP_SH_RD));
+ }
+
+ void mflo(RegisterID rd)
+ {
+ emitInst(0x00000012 | (rd << OP_SH_RD));
+ }
+
+ void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+#else
+ mult(rs, rt);
+ mflo(rd);
+#endif
+ }
+
+ void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void andi(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (imm & 0xffff));
+ }
+
+ void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void ori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (imm & 0xffff));
+ }
+
+ void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void xori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (imm & 0xffff));
+ }
+
+ void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS)
+ | (rt << OP_SH_RT));
+ }
+
+ void sltiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (imm & 0xffff));
+ }
+
+ void sll(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
+ | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void sllv(RegisterID rd, RegisterID rt, int rs)
+ {
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
+ | (rs << OP_SH_RS));
+ }
+
+ void sra(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
+ | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
+ | (rs << OP_SH_RS));
+ }
+
+ void lbu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwl(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwr(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lhu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void sw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ }
+
+ void jr(RegisterID rs)
+ {
+ emitInst(0x00000008 | (rs << OP_SH_RS));
+ }
+
+ void jalr(RegisterID rs)
+ {
+ emitInst(0x0000f809 | (rs << OP_SH_RS));
+ }
+
+ void jal()
+ {
+ emitInst(0x0c000000);
+ }
+
+ void bkpt()
+ {
+ int value = 512; /* BRK_BUG */
+ emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+ }
+
+ void bgez(RegisterID rs, int imm)
+ {
+ emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void bltz(RegisterID rs, int imm)
+ {
+ emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void beq(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bne(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bc1t()
+ {
+ emitInst(0x45010000);
+ }
+
+ void bc1f()
+ {
+ emitInst(0x45000000);
+ }
+
+ JmpSrc newJmpSrc()
+ {
+ return JmpSrc(m_buffer.size());
+ }
+
+ void appendJump()
+ {
+ m_jumps.append(m_buffer.size());
+ }
+
+ void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
+ | (ft << OP_SH_FT));
+ }
+
+ void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
+ | (ft << OP_SH_FT));
+ }
+
+ void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
+ | (ft << OP_SH_FT));
+ }
+
+ void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ copDelayNop();
+ }
+
+ void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ }
+
+ void swc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ }
+
+ void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
+ | (offset & 0xffff));
+ }
+
+ void mtc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mfc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void truncwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtdw(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void ceqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cngtd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cnged(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cueqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void culed(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cultd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ // General helpers
+
+ JmpDst label()
+ {
+ return JmpDst(m_buffer.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ ASSERT(jump.m_offset != -1);
+ void* b = reinterpret_cast<void*>((reinterpret_cast<intptr_t>(code)) + jump.m_offset);
+ return b;
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst label)
+ {
+ void* b = reinterpret_cast<void*>((reinterpret_cast<intptr_t>(code)) + label.m_offset);
+ return b;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpDst to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpSrc to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc from, JmpDst to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t size() const
+ {
+ return m_buffer.size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void *result = m_buffer.executableCopy(allocator);
+ if (!result)
+ return 0;
+
+ relocateJumps(m_buffer.data(), result);
+ return result;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ // The return address is after a call and a delay slot instruction
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ASSERT(to.m_offset != -1);
+ ASSERT(from.m_offset != -1);
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + from.m_offset);
+ MIPSWord* toPos = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + to.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, toPos);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, to);
+ }
+
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkCallInternal(insn, to);
+ }
+
+ static void linkPointer(void* code, JmpDst from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+ insn = insn - 6;
+ int flushSize = linkWithOffset(insn, to);
+
+ ExecutableAllocator::cacheFlush(insn, flushSize);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ void* start;
+ int size = linkCallInternal(from, to);
+ if (size == sizeof(MIPSWord))
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 2 * sizeof(MIPSWord));
+ else
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 4 * sizeof(MIPSWord));
+
+ ExecutableAllocator::cacheFlush(start, size);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (to & 0xffff);
+ insn--;
+ ExecutableAllocator::cacheFlush(insn, 2 * sizeof(MIPSWord));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ repatchInt32(from, reinterpret_cast<int32_t>(to));
+ }
+
+ static void repatchLoadPtrToLEA(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn + 3;
+ ASSERT((*insn & 0xfc000000) == 0x8c000000); // lw
+ /* lw -> addiu */
+ *insn = 0x24000000 | (*insn & 0x03ffffff);
+
+ ExecutableAllocator::cacheFlush(insn, sizeof(MIPSWord));
+ }
+
+private:
+
+ /* Update each jump in the buffer of newBase. */
+ void relocateJumps(void* oldBase, void* newBase)
+ {
+ // Check each jump
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ int pos = *iter;
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
+ insn = insn + 2;
+ // Need to make sure we have 5 valid instructions after pos
+ if ((unsigned int)pos >= m_buffer.size() - 5 * sizeof(MIPSWord))
+ continue;
+
+ if ((*insn & 0xfc000000) == 0x08000000) { // j
+ int offset = *insn & 0x03ffffff;
+ int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+ int topFourBits = (oldInsnAddress + 4) >> 28;
+ int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ int newInsnAddress = (int)insn;
+ if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+ *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+ else {
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ }
+ } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+ int high = (*insn & 0xffff) << 16;
+ int low = *(insn + 1) & 0xffff;
+ int oldTargetAddress = high | low;
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ }
+ }
+ }
+
+ static int linkWithOffset(MIPSWord* insn, void* to)
+ {
+ ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to)
+ - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+
+ if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+ /*
+ Convert the sequence:
+ beq $2, $3, target
+ nop
+ b 1f
+ nop
+ nop
+ nop
+ 1:
+
+ to the new sequence if possible:
+ bne $2, $3, 1f
+ nop
+ j target
+ nop
+ nop
+ nop
+ 1:
+
+ OR to the new sequence:
+ bne $2, $3, 1f
+ nop
+ lui $25, target >> 16
+ ori $25, $25, target & 0xffff
+ jr $25
+ nop
+ 1:
+
+ Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+ */
+
+ if (*(insn + 2) == 0x10000003) {
+ if ((*insn & 0xfc000000) == 0x10000000) // beq
+ *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+ else if ((*insn & 0xfc000000) == 0x14000000) // bne
+ *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+ else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+ *insn = 0x45000005; // bc1f
+ else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+ *insn = 0x45010005; // bc1t
+ else
+ ASSERT(0);
+ }
+
+ insn = insn + 2;
+ if ((reinterpret_cast<intptr_t>(insn) + 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *insn = 0x08000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ *(insn + 1) = 0;
+ return 4 * sizeof(MIPSWord);
+ }
+
+ intptr_t newTargetAddress = reinterpret_cast<intptr_t>(to);
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 5 * sizeof(MIPSWord);
+ }
+
+ *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+ return sizeof(MIPSWord);
+ }
+
+ static int linkCallInternal(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn - 4;
+
+ if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+ if ((reinterpret_cast<intptr_t>(from) - 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *(insn + 2) = 0x0c000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ return sizeof(MIPSWord);
+ }
+
+ /* lui $25, (to >> 16) & 0xffff */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori $25, $25, to & 0xffff */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ /* jalr $25 */
+ *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 3 * sizeof(MIPSWord);
+ }
+
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+ /* lui */
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ return 2 * sizeof(MIPSWord);
+ }
+
+ AssemblerBuffer m_buffer;
+ Jumps m_jumps;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MIPSAssembler_h
diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h
index 76bd205..ce1be78 100644
--- a/JavaScriptCore/assembler/MacroAssembler.h
+++ b/JavaScriptCore/assembler/MacroAssembler.h
@@ -26,8 +26,6 @@
#ifndef MacroAssembler_h
#define MacroAssembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#if CPU(ARM_THUMB2)
@@ -38,6 +36,12 @@ namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
#include "MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
#elif CPU(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
@@ -327,6 +331,11 @@ public:
{
return branchSub32(cond, imm, dest);
}
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
#endif
};
diff --git a/JavaScriptCore/assembler/MacroAssemblerARM.h b/JavaScriptCore/assembler/MacroAssemblerARM.h
index 21b8de8..40d2e4a 100644
--- a/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2008 Apple Inc.
- * Copyright (C) 2009 University of Szeged
+ * Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,8 +28,6 @@
#ifndef MacroAssemblerARM_h
#define MacroAssemblerARM_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "ARMAssembler.h"
@@ -180,6 +178,20 @@ public:
{
m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
}
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2(0x1f);
+ ASSERT(w != ARMAssembler::INVALID_IMM);
+ m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
+
+ m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
+ }
void sub32(RegisterID src, RegisterID dest)
{
@@ -214,6 +226,11 @@ public:
m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
+ }
+
void load32(ImplicitAddress address, RegisterID dest)
{
m_assembler.dataTransfer32(true, dest, address.base, address.offset);
@@ -256,6 +273,14 @@ public:
else
m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
}
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= 0)
+ m_assembler.ldrh_u(dest, address.base, ARMAssembler::getOp2Byte(address.offset));
+ else
+ m_assembler.ldrh_d(dest, address.base, ARMAssembler::getOp2Byte(-address.offset));
+ }
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
@@ -359,6 +384,12 @@ public:
move(src, dest);
}
+ Jump branch8(Condition cond, Address left, Imm32 right)
+ {
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
{
m_assembler.cmp_r(left, right);
@@ -422,6 +453,12 @@ public:
return m_assembler.jmp(ARMCondition(cond));
}
+ Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
@@ -459,7 +496,7 @@ public:
void jump(RegisterID target)
{
- move(target, ARMRegisters::pc);
+ m_assembler.bx(target);
}
void jump(Address address)
@@ -530,6 +567,13 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
+ Jump branchNeg32(Condition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -544,14 +588,19 @@ public:
Call nearCall()
{
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
+#else
prepareCall();
return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
+#endif
}
Call call(RegisterID target)
{
- prepareCall();
- move(ARMRegisters::pc, target);
+ m_assembler.blx(target);
JmpSrc jmpSrc;
return Call(jmpSrc, Call::None);
}
@@ -563,7 +612,7 @@ public:
void ret()
{
- m_assembler.mov_r(ARMRegisters::pc, linkRegister);
+ m_assembler.bx(linkRegister);
}
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
@@ -659,8 +708,14 @@ public:
Call call()
{
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
+#else
prepareCall();
return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
+#endif
}
Call tailRecursiveCall()
@@ -718,12 +773,17 @@ public:
return false;
}
+ bool supportsFloatingPointSqrt() const
+ {
+ return s_isVFPPresent;
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
m_assembler.doubleTransfer(true, dest, address.base, address.offset);
}
- void loadDouble(void* address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
@@ -779,6 +839,11 @@ public:
mulDouble(ARMRegisters::SD0, dest);
}
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fsqrtd_r(dest, src);
+ }
+
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.fmsr_r(dest, src);
@@ -864,44 +929,56 @@ protected:
void prepareCall()
{
+#if WTF_ARM_ARCH_VERSION < 5
ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
m_assembler.mov_r(linkRegister, ARMRegisters::pc);
+#endif
}
void call32(RegisterID base, int32_t offset)
{
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ int targetReg = ARMRegisters::S1;
+#else
+ int targetReg = ARMRegisters::pc;
+#endif
+ int tmpReg = ARMRegisters::S1;
+
if (base == ARMRegisters::sp)
offset += 4;
if (offset >= 0) {
if (offset <= 0xfff) {
prepareCall();
- m_assembler.dtr_u(true, ARMRegisters::pc, base, offset);
+ m_assembler.dtr_u(true, targetReg, base, offset);
} else if (offset <= 0xfffff) {
- m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
prepareCall();
- m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+ m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
} else {
- ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+ ARMWord reg = m_assembler.getImm(offset, tmpReg);
prepareCall();
- m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg);
+ m_assembler.dtr_ur(true, targetReg, base, reg);
}
} else {
offset = -offset;
if (offset <= 0xfff) {
prepareCall();
- m_assembler.dtr_d(true, ARMRegisters::pc, base, offset);
+ m_assembler.dtr_d(true, targetReg, base, offset);
} else if (offset <= 0xfffff) {
- m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
prepareCall();
- m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+ m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
} else {
- ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+ ARMWord reg = m_assembler.getImm(offset, tmpReg);
prepareCall();
- m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg);
+ m_assembler.dtr_dr(true, targetReg, base, reg);
}
}
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ m_assembler.blx(targetReg);
+#endif
}
private:
diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 532a9cf..31a68d9 100644
--- a/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +27,6 @@
#ifndef MacroAssemblerARMv7_h
#define MacroAssemblerARMv7_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "ARMv7Assembler.h"
@@ -258,6 +257,21 @@ public:
{
m_assembler.asr(dest, dest, imm.m_value & 0x1f);
}
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+
+ m_assembler.lsr(dest, dest, dataTempRegister);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
+ }
void sub32(RegisterID src, RegisterID dest)
{
@@ -368,6 +382,20 @@ private:
}
}
+ void load8(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrb(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
void store32(RegisterID src, ArmAddress address)
{
if (address.type == ArmAddress::HasIndex)
@@ -404,6 +432,11 @@ public:
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
{
DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
@@ -423,6 +456,11 @@ public:
{
m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
}
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, address.base, address.offset);
+ }
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
@@ -476,6 +514,11 @@ public:
// In short, FIXME:.
bool supportsFloatingPointTruncate() const { return false; }
+ bool supportsFloatingPointSqrt() const
+ {
+ return false;
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
RegisterID base = address.base;
@@ -539,6 +582,11 @@ public:
mulDouble(fpTempRegister, dest);
}
+ void sqrtDouble(FPRegisterID, FPRegisterID)
+ {
+ ASSERT_NOT_REACHED();
+ }
+
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.vmov(fpTempRegister, src);
@@ -793,6 +841,19 @@ public:
return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
}
+ Jump branch8(Condition cond, RegisterID left, Imm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch8(Condition cond, Address left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch8(cond, addressTempRegister, right);
+ }
+
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
@@ -823,6 +884,21 @@ public:
return branchTest32(cond, addressTempRegister, mask);
}
+ Jump branchTest8(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest8(cond, addressTempRegister, mask);
+ }
+
Jump jump()
{
return Jump(makeJump());
@@ -973,6 +1049,14 @@ public:
m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
}
+ void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load8(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
{
diff --git a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index cae8bf6..543b0fa 100644
--- a/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -26,8 +26,6 @@
#ifndef MacroAssemblerCodeRef_h
#define MacroAssemblerCodeRef_h
-#include <wtf/Platform.h>
-
#include "ExecutableAllocator.h"
#include "PassRefPtr.h"
#include "RefPtr.h"
diff --git a/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/JavaScriptCore/assembler/MacroAssemblerMIPS.h
new file mode 100644
index 0000000..9853c34
--- /dev/null
+++ b/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -0,0 +1,1711 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerMIPS_h
+#define MacroAssemblerMIPS_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> {
+public:
+
+ MacroAssemblerMIPS()
+ : m_fixedWidth(false)
+ {
+ }
+
+ static const Scale ScalePtr = TimesFour;
+
+ // For storing immediate number
+ static const RegisterID immTempRegister = MIPSRegisters::t0;
+ // For storing data loaded from the memory
+ static const RegisterID dataTempRegister = MIPSRegisters::t1;
+ // For storing address base
+ static const RegisterID addrTempRegister = MIPSRegisters::t2;
+ // For storing compare result
+ static const RegisterID cmpTempRegister = MIPSRegisters::t3;
+
+ // FP temp register
+ static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
+
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ Signed,
+ Zero,
+ NonZero
+ };
+
+ enum DoubleCondition {
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static const RegisterID stackPointerRegister = MIPSRegisters::sp;
+ static const RegisterID returnAddressRegister = MIPSRegisters::ra;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addu(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_isPointer && imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ addu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.addu(dest, src, immTempRegister);
+ }
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (!imm.m_isPointer
+ && imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister,
+ imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister,
+ immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister,
+ imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister,
+ immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, dest.base, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, dest.base, dest.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, dest.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset);
+ }
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw dataTemp, 0(addrTemp)
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(ImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+ if (!imm.m_isPointer && imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (!imm.m_isPointer && imm.m_value > 0 && imm.m_value < 65535
+ && !m_fixedWidth)
+ m_assembler.andi(dest, dest, imm.m_value);
+ else {
+ /*
+ li immTemp, imm
+ and dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, dest, immTempRegister);
+ }
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sll(dest, dest, imm.m_value);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, dest, shiftAmount);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul(dest, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (!imm.m_isPointer && imm.m_value == 1 && !m_fixedWidth)
+ move(src, dest);
+ else {
+ /*
+ li dataTemp, imm
+ mul dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.mul(dest, src, dataTempRegister);
+ }
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.nor(srcDest, srcDest, MIPSRegisters::zero);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth)
+ return;
+
+ if (!imm.m_isPointer && imm.m_value > 0 && imm.m_value < 65535
+ && !m_fixedWidth) {
+ m_assembler.ori(dest, dest, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, dest, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, dest, dataTempRegister);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, dest, shiftAmount);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, dest, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subu(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (!imm.m_isPointer && imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, dest, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, dest, immTempRegister);
+ }
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (!imm.m_isPointer
+ && imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister,
+ -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister,
+ immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (!imm.m_isPointer
+ && imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister,
+ -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister,
+ immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw dataTemp, 0(addrTemp)
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(ImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+
+ if (!imm.m_isPointer && imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ m_assembler.addiu(dataTempRegister, dataTempRegister,
+ -imm.m_value);
+ } else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, dest, immTempRegister);
+ }
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ /* Need to use zero-extened load byte for load8. */
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lbu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lbu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lw(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lw dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lw dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister,
+ immTempRegister);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32764
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ (Big-Endian)
+ lwl dest, address.offset(addrTemp)
+ lwr dest, address.offset+3(addrTemp)
+ (Little-Endian)
+ lwl dest, address.offset+3(addrTemp)
+ lwr dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, address.offset);
+ m_assembler.lwr(dest, addrTempRegister, address.offset + 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, address.offset + 3);
+ m_assembler.lwr(dest, addrTempRegister, address.offset);
+
+#endif
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, address.offset >> 16
+ ori immTemp, immTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, immTemp
+ (Big-Endian)
+ lw dest, 0(at)
+ lw dest, 3(at)
+ (Little-Endian)
+ lw dest, 3(at)
+ lw dest, 0(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, address.offset >> 16);
+ m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+ m_assembler.addu(addrTempRegister, addrTempRegister,
+ immTempRegister);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, 0);
+ m_assembler.lwr(dest, addrTempRegister, 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, 3);
+ m_assembler.lwr(dest, addrTempRegister, 0);
+#endif
+ }
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ /*
+ li addrTemp, address
+ lw dest, 0(addrTemp)
+ */
+ move(ImmPtr(address), addrTempRegister);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ lw dest, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(Imm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ lw dest, 0(addrTemp)
+ */
+ Label label(this);
+ move(Imm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ Label loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return loadPtrWithPatchToLEA(address, dest);
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lhu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lhu dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister,
+ immTempRegister);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ sw src, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(Imm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sw(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister,
+ immTempRegister);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ if (!imm.m_isPointer && !imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, address.base,
+ address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, address.base, address.offset);
+ }
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw immTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister,
+ address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister,
+ address.offset);
+ }
+ }
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ /*
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ move(ImmPtr(address), addrTempRegister);
+ m_assembler.sw(src, addrTempRegister, 0);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth) {
+ move(ImmPtr(address), addrTempRegister);
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(ImmPtr(address), addrTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ // Floating-point operations:
+
+ bool supportsFloatingPoint() const
+ {
+#if WTF_MIPS_DOUBLE_FLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ bool supportsFloatingPointTruncate() const
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.lw(dest, MIPSRegisters::sp, 0);
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
+ m_assembler.sw(src, MIPSRegisters::sp, 0);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, immTempRegister);
+ push(immTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (!imm.m_isPointer && !imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_isPointer || m_fixedWidth) {
+ m_assembler.lui(dest, imm.m_value >> 16);
+ m_assembler.ori(dest, dest, imm.m_value);
+ } else
+ m_assembler.li(dest, imm.m_value);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.move(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, immTempRegister);
+ move(reg2, reg1);
+ move(immTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+ Jump branch8(Condition cond, Address left, Imm32 right)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ if (cond == Equal || cond == Zero)
+ return branchEqual(left, right);
+ if (cond == NotEqual || cond == NonZero)
+ return branchNotEqual(left, right);
+ if (cond == Above) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == AboveOrEqual) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Below) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == BelowOrEqual) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThan) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThan) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Overflow) {
+ /*
+ xor cmpTemp, left, right
+ bgez No_overflow, cmpTemp # same sign bit -> no overflow
+ nop
+ subu cmpTemp, left, right
+ xor cmpTemp, cmpTemp, left
+ bgez No_overflow, cmpTemp # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.xorInsn(cmpTempRegister, left, right);
+ m_assembler.bgez(cmpTempRegister, 11);
+ m_assembler.nop();
+ m_assembler.subu(cmpTempRegister, left, right);
+ m_assembler.xorInsn(cmpTempRegister, cmpTempRegister, left);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ m_assembler.subu(cmpTempRegister, left, right);
+ // Check if the result is negative.
+ m_assembler.slt(cmpTempRegister, cmpTempRegister,
+ MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ move(right, immTempRegister);
+ return branch32(cond, left, immTempRegister);
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ load32(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32(left, dataTempRegister);
+ // Be careful that the previous load32() uses immTempRegister.
+ // So, we need to put move() after load32().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32WithUnalignedHalfWords(left, dataTempRegister);
+ // Be careful that the previous load32WithUnalignedHalfWords()
+ // uses immTempRegister.
+ // So, we need to put move() after load32WithUnalignedHalfWords().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ load16(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFF0000));
+ load16(left, dataTempRegister);
+ // Be careful that the previous load16() uses immTempRegister.
+ // So, we need to put move() after load16().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.andInsn(cmpTempRegister, reg, mask);
+ if (cond == Zero)
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ return branchEqual(reg, MIPSRegisters::zero);
+ return branchNotEqual(reg, MIPSRegisters::zero);
+ }
+ move(mask, immTempRegister);
+ return branchTest32(cond, reg, immTempRegister);
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load8(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jr(target);
+ m_assembler.nop();
+ }
+
+ void jump(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src, dest);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchMul32(cond, immTempRegister, dest);
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, immTempRegister, dest);
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ /* We need two words for relaxation. */
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.jal();
+ m_assembler.nop();
+ return Call(m_assembler.newJmpSrc(), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ m_assembler.lui(MIPSRegisters::t9, 0);
+ m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.newJmpSrc(), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ m_assembler.jalr(target);
+ m_assembler.nop();
+ return Call(m_assembler.newJmpSrc(), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.newJmpSrc(), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.jr(MIPSRegisters::ra);
+ m_assembler.nop();
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ if (cond == Equal || cond == Zero) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltiu(dest, dest, 1);
+ } else if (cond == NotEqual || cond == NonZero) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltu(dest, MIPSRegisters::zero, dest);
+ } else if (cond == Above)
+ m_assembler.sltu(dest, right, left);
+ else if (cond == AboveOrEqual) {
+ m_assembler.sltu(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == Below)
+ m_assembler.sltu(dest, left, right);
+ else if (cond == BelowOrEqual) {
+ m_assembler.sltu(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == GreaterThan)
+ m_assembler.slt(dest, right, left);
+ else if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == LessThan)
+ m_assembler.slt(dest, left, right);
+ else if (cond == LessThanOrEqual) {
+ m_assembler.slt(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == Overflow) {
+ /*
+ xor cmpTemp, left, right
+ bgez Done, cmpTemp # same sign bit -> no overflow
+ move dest, 0
+ subu cmpTemp, left, right
+ xor cmpTemp, cmpTemp, left # diff sign bit -> overflow
+ slt dest, cmpTemp, 0
+ Done:
+ */
+ m_assembler.xorInsn(cmpTempRegister, left, right);
+ m_assembler.bgez(cmpTempRegister, 4);
+ m_assembler.move(dest, MIPSRegisters::zero);
+ m_assembler.subu(cmpTempRegister, left, right);
+ m_assembler.xorInsn(cmpTempRegister, cmpTempRegister, left);
+ m_assembler.slt(dest, cmpTempRegister, MIPSRegisters::zero);
+ } else if (cond == Signed) {
+ m_assembler.subu(dest, left, right);
+ // Check if the result is negative.
+ m_assembler.slt(dest, dest, MIPSRegisters::zero);
+ }
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ move(right, immTempRegister);
+ set32(cond, left, immTempRegister, dest);
+ }
+
+ void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load8(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister,
+ immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load32(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister,
+ immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ DataLabel32 moveWithPatch(Imm32 imm, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabel32 label(this);
+ move(imm, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr label(this);
+ move(initialValue, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ m_fixedWidth = true;
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, left, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ m_fixedWidth = true;
+ load32(left, dataTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(ImmPtr(0), address);
+ }
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't update the returned address register
+ m_fixedWidth = true;
+ move(Imm32(0), MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.newJmpSrc(), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(Imm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ m_assembler.ldc1(dest, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ ldc1 dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ swc1 dest, 0(addrTemp)
+ swc1 dest+1, 4(addrTemp)
+ */
+ move(Imm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sdc1(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sdc1 src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.subd(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.muld(dest, dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.mtc1(src, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void insertRelaxationWords()
+ {
+ /* We need four words for relaxation. */
+ m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops;
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.nop();
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1t();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.newJmpSrc());
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1f();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.newJmpSrc());
+ }
+
+ Jump branchEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.appendJump();
+ m_assembler.beq(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.newJmpSrc());
+ }
+
+ Jump branchNotEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.appendJump();
+ m_assembler.bne(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.newJmpSrc());
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.ceqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqual) {
+ m_assembler.ceqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThan) {
+ m_assembler.cngtd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.cnged(right, left);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThan) {
+ m_assembler.cltd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.cled(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ m_assembler.cueqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleGreaterThanOrUnordered) {
+ m_assembler.coled(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ m_assembler.coltd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThanOrUnordered) {
+ m_assembler.cultd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ m_assembler.culed(left, right);
+ return branchTrue();
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MAX 0x7fffffff).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(Equal, dest, Imm32(0x7fffffff));
+ }
+
+private:
+ // If m_fixedWidth is true, we will generate a fixed number of instructions.
+ // Otherwise, we can emit any number of instructions.
+ bool m_fixedWidth;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ MIPSAssembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MacroAssemblerMIPS_h
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h
index ca7c31a..0918996 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -26,8 +26,6 @@
#ifndef MacroAssemblerX86_h
#define MacroAssemblerX86_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && CPU(X86)
#include "MacroAssemblerX86Common.h"
@@ -89,7 +87,7 @@ public:
m_assembler.movl_mr(address, dest);
}
- void loadDouble(void* address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.movsd_mr(address, dest);
@@ -174,6 +172,7 @@ public:
bool supportsFloatingPoint() const { return m_isSSE2Present; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
+ bool supportsFloatingPointSqrt() const { return m_isSSE2Present; }
private:
const bool m_isSSE2Present;
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index 449df86..7296193 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -26,8 +26,6 @@
#ifndef MacroAssemblerX86Common_h
#define MacroAssemblerX86Common_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "X86Assembler.h"
@@ -251,6 +249,33 @@ public:
{
m_assembler.sarl_i8r(imm.m_value, dest);
}
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
+
+ // E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.shrl_CLr(X86Registers::ecx);
+ // E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86Registers::ecx)
+ m_assembler.shrl_CLr(shift_amount);
+ // E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.shrl_CLr(dest);
+
+ swap(shift_amount, X86Registers::ecx);
+ } else
+ m_assembler.shrl_CLr(dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.shrl_i8r(imm.m_value, dest);
+ }
void sub32(RegisterID src, RegisterID dest)
{
@@ -303,6 +328,10 @@ public:
m_assembler.xorl_mr(src.offset, src.base, dest);
}
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtsd_rr(src, dst);
+ }
// Memory access operations:
//
@@ -336,6 +365,11 @@ public:
{
m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
}
+
+ void load16(Address address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, dest);
+ }
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
@@ -620,6 +654,12 @@ public:
// an optional second operand of a mask under which to perform the test.
public:
+ Jump branch8(Condition cond, Address left, Imm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
Jump branch32(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpl_rr(right, left);
@@ -717,6 +757,26 @@ public:
m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+
+ Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
Jump jump()
{
@@ -836,6 +896,13 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branchNeg32(Condition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -917,10 +984,11 @@ public:
void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
+ m_assembler.cmpb_im(0, address.offset, address.base);
else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
}
void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index ec93f8c..168c93f 100644
--- a/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -26,8 +26,6 @@
#ifndef MacroAssemblerX86_64_h
#define MacroAssemblerX86_64_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && CPU(X86_64)
#include "MacroAssemblerX86Common.h"
@@ -88,7 +86,7 @@ public:
}
}
- void loadDouble(void* address, FPRegisterID dest)
+ void loadDouble(const void* address, FPRegisterID dest)
{
move(ImmPtr(address), scratchRegister);
loadDouble(scratchRegister, dest);
@@ -411,6 +409,14 @@ public:
return label;
}
+ using MacroAssemblerX86Common::branchTest8;
+ Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
+ {
+ ImmPtr addr(reinterpret_cast<void*>(address.offset));
+ MacroAssemblerX86Common::move(addr, scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
+ }
+
Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
{
Label label(this);
@@ -421,6 +427,7 @@ public:
bool supportsFloatingPoint() const { return true; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return true; }
+ bool supportsFloatingPointSqrt() const { return true; }
private:
friend class LinkBuffer;
diff --git a/JavaScriptCore/assembler/RepatchBuffer.h b/JavaScriptCore/assembler/RepatchBuffer.h
index 89cbf06..72cf6b2 100644
--- a/JavaScriptCore/assembler/RepatchBuffer.h
+++ b/JavaScriptCore/assembler/RepatchBuffer.h
@@ -26,8 +26,6 @@
#ifndef RepatchBuffer_h
#define RepatchBuffer_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include <MacroAssembler.h>
diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h
index ab3d05f..20d72f5 100644
--- a/JavaScriptCore/assembler/X86Assembler.h
+++ b/JavaScriptCore/assembler/X86Assembler.h
@@ -26,8 +26,6 @@
#ifndef X86Assembler_h
#define X86Assembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
@@ -130,6 +128,7 @@ private:
PRE_SSE_66 = 0x66,
OP_PUSH_Iz = 0x68,
OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EbIb = 0x80,
OP_GROUP1_EvIz = 0x81,
OP_GROUP1_EvIb = 0x83,
OP_TEST_EvGv = 0x85,
@@ -168,6 +167,7 @@ private:
OP2_MULSD_VsdWsd = 0x59,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_SQRTSD_VsdWsd = 0x51,
OP2_XORPD_VpdWpd = 0x57,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVD_EdVd = 0x7E,
@@ -201,6 +201,7 @@ private:
GROUP1A_OP_POP = 0,
GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
GROUP3_OP_TEST = 0,
@@ -673,6 +674,21 @@ public:
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
}
+
+ void shrl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+ }
void shll_i8r(int imm, RegisterID dst)
{
@@ -760,7 +776,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
-
+
void cmpl_im(int imm, int offset, RegisterID base)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -771,6 +787,18 @@ public:
m_formatter.immediate32(imm);
}
}
+
+ void cmpb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
@@ -890,6 +918,18 @@ public:
m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
m_formatter.immediate32(imm);
}
+
+ void testb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
@@ -1370,7 +1410,7 @@ public:
}
#if !CPU(X86_64)
- void movsd_mr(void* address, XMMRegisterID dst)
+ void movsd_mr(const void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
@@ -1438,6 +1478,12 @@ public:
m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
}
+ void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
// Misc instructions:
void int3()
@@ -1723,7 +1769,7 @@ private:
}
#if !CPU(X86_64)
- void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
@@ -2034,7 +2080,7 @@ private:
}
#if !CPU(X86_64)
- void memoryModRM(int reg, void* address)
+ void memoryModRM(int reg, const void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
putModRm(ModRmMemoryNoDisp, reg, noBase);