summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h73
-rw-r--r--JavaScriptCore/jit/JIT.cpp564
-rw-r--r--JavaScriptCore/jit/JIT.h661
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp1316
-rw-r--r--JavaScriptCore/jit/JITCall.cpp472
-rw-r--r--JavaScriptCore/jit/JITCode.h13
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h703
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp2022
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp1144
-rw-r--r--JavaScriptCore/jit/JITStubCall.h144
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp959
-rw-r--r--JavaScriptCore/jit/JITStubs.h211
12 files changed, 6464 insertions, 1818 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index a545b0c..4ed47e3 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -156,7 +156,7 @@ public:
return pool.release();
}
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void makeWritable(void* start, size_t size)
{
reprotectRegion(start, size, Writable);
@@ -165,58 +165,47 @@ public:
static void makeExecutable(void* start, size_t size)
{
reprotectRegion(start, size, Executable);
- cacheFlush(start, size);
}
-
- // If ASSEMBLER_WX_EXCLUSIVE protection is turned on, or on non-x86 platforms,
- // we need to track start & size so we can makeExecutable/cacheFlush at the end.
- class MakeWritable {
- public:
- MakeWritable(void* start, size_t size)
- : m_start(start)
- , m_size(size)
- {
- makeWritable(start, size);
- }
-
- ~MakeWritable()
- {
- makeExecutable(m_start, m_size);
- }
-
- private:
- void* m_start;
- size_t m_size;
- };
#else
static void makeWritable(void*, size_t) {}
static void makeExecutable(void*, size_t) {}
-
- // On x86, without ASSEMBLER_WX_EXCLUSIVE, there is nothing to do here.
- class MakeWritable { public: MakeWritable(void*, size_t) {} };
#endif
-private:
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64))
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void reprotectRegion(void*, size_t, ProtectionSeting);
-#else
- static void reprotectRegion(void*, size_t, ProtectionSeting) {}
-#endif
- static void cacheFlush(void* code, size_t size)
- {
#if PLATFORM(X86) || PLATFORM(X86_64)
- UNUSED_PARAM(code);
- UNUSED_PARAM(size);
-#elif PLATFORM(ARM_V7) && PLATFORM(IPHONE)
+ static void cacheFlush(void*, size_t)
+ {
+ }
+#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
+ static void cacheFlush(void* code, size_t size)
+ {
sys_dcache_flush(code, size);
sys_icache_invalidate(code, size);
-#else
-#error "ExecutableAllocator::cacheFlush not implemented on this platform."
-#endif
}
+#elif PLATFORM(ARM)
+ static void cacheFlush(void* code, size_t size)
+ {
+ #if COMPILER(GCC) && (GCC_VERSION >= 30406)
+ __clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(code) + size);
+ #else
+ const int syscall = 0xf0002;
+ __asm __volatile (
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, %2\n"
+ "mov r2, #0x0\n"
+ "swi 0x00000000\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size), "r" (syscall)
+ : "r0", "r1", "r7");
+ #endif // COMPILER(GCC) && (GCC_VERSION >= 30406)
+ }
+#endif
+
+private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSeting);
#endif
RefPtr<ExecutablePool> m_smallAllocationPool;
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index 0cfb535..0d6d1b8 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -1,4 +1,3 @@
-
/*
* Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
@@ -27,14 +26,23 @@
#include "config.h"
#include "JIT.h"
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC)
+#include "MacroAssembler.h"
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
#if ENABLE(JIT)
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
+#include "JITStubs.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -46,19 +54,22 @@ using namespace std;
namespace JSC {
-void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
{
- returnAddress.relinkNearCallerToTrampoline(newCalleeFunction);
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
}
-void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
{
- returnAddress.relinkCallerToTrampoline(newCalleeFunction);
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
}
-void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction)
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
{
- returnAddress.relinkCallerToFunction(newCalleeFunction);
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
@@ -68,51 +79,68 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_labels(codeBlock ? codeBlock->instructions().size() : 0)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
+ , m_bytecodeIndex((unsigned)-1)
+#if USE(JSVALUE32_64)
+ , m_jumpTargetIndex(0)
+ , m_mappedBytecodeIndex((unsigned)-1)
+ , m_mappedVirtualRegisterIndex((unsigned)-1)
+ , m_mappedTag((RegisterID)-1)
+ , m_mappedPayload((RegisterID)-1)
+#else
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
+#endif
{
}
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+#if USE(JSVALUE32_64)
+void JIT::emitTimeoutCheck()
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
- move(regT0, regT2);
- orPtr(regT1, regT2);
- addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
-
- if (type == OpStrictEq)
- set32(Equal, regT1, regT0, regT0);
- else
- set32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(dst);
+ Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(timeoutCheckRegister);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
}
-
+#else
void JIT::emitTimeoutCheck()
{
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall(this, JITStubs::cti_timeout_check).call(timeoutCheckRegister);
+ JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
skipTimeout.link(this);
killLastResultRegister();
}
-
+#endif
#define NEXT_OPCODE(name) \
m_bytecodeIndex += OPCODE_LENGTH(name); \
break;
+#if USE(JSVALUE32_64)
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.addArgument(currentInstruction[3].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#else // USE(JSVALUE32_64)
+
#define DEFINE_BINARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
@@ -121,11 +149,12 @@ void JIT::emitTimeoutCheck()
#define DEFINE_UNARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
+#endif // USE(JSVALUE32_64)
#define DEFINE_OP(name) \
case name: { \
@@ -157,14 +186,18 @@ void JIT::privateCompileMainPass()
sampleInstruction(currentInstruction);
#endif
+#if !USE(JSVALUE32_64)
if (m_labels[m_bytecodeIndex].isUsed())
killLastResultRegister();
-
+#endif
+
m_labels[m_bytecodeIndex] = label();
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
+#if !USE(JSVALUE32_64)
DEFINE_BINARY_OP(op_div)
+#endif
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
@@ -176,7 +209,9 @@ void JIT::privateCompileMainPass()
DEFINE_UNARY_OP(op_is_object)
DEFINE_UNARY_OP(op_is_string)
DEFINE_UNARY_OP(op_is_undefined)
+#if !USE(JSVALUE32_64)
DEFINE_UNARY_OP(op_negate)
+#endif
DEFINE_UNARY_OP(op_typeof)
DEFINE_OP(op_add)
@@ -195,6 +230,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_div)
+#endif
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
DEFINE_OP(op_enter_with_activation)
@@ -225,6 +263,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_negate)
+#endif
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
@@ -254,7 +295,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_setter)
DEFINE_OP(op_resolve)
DEFINE_OP(op_resolve_base)
- DEFINE_OP(op_resolve_func)
DEFINE_OP(op_resolve_global)
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
@@ -272,7 +312,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_throw)
DEFINE_OP(op_to_jsnumber)
DEFINE_OP(op_to_primitive)
- DEFINE_OP(op_unexpected_load)
case op_get_array_length:
case op_get_by_id_chain:
@@ -312,11 +351,15 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
+#if USE(JSVALUE32_64)
+ m_globalResolveInfoIndex = 0;
+#endif
m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
- // FIXME: enable peephole optimizations for slow cases when applicable
+#if !USE(JSVALUE32_64)
killLastResultRegister();
+#endif
m_bytecodeIndex = iter->to;
#ifndef NDEBUG
@@ -336,6 +379,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_div)
+#endif
DEFINE_SLOWCASE_OP(op_eq)
DEFINE_SLOWCASE_OP(op_get_by_id)
DEFINE_SLOWCASE_OP(op_get_by_val)
@@ -348,9 +394,12 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
DEFINE_SLOWCASE_OP(op_loop_if_true)
DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
- DEFINE_SLOWCASE_OP(op_method_check)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_negate)
+#endif
DEFINE_SLOWCASE_OP(op_neq)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
@@ -360,6 +409,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_pre_inc)
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_resolve_global)
+#endif
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
@@ -394,7 +446,7 @@ void JIT::privateCompile()
#endif
// Could use a pop_m, but would need to offset the following instruction if so.
- preverveReturnAddressAfterCall(regT2);
+ preserveReturnAddressAfterCall(regT2);
emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
Jump slowRegisterFileCheck;
@@ -403,10 +455,10 @@ void JIT::privateCompile()
// In the case of a fast linked call, we do not set this up in the caller.
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
- peek(regT0, FIELD_OFFSET(JITStackFrame, registerFile) / sizeof (void*));
+ peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*));
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end)));
+ slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end)));
afterRegisterFileCheck = label();
}
@@ -417,7 +469,7 @@ void JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
slowRegisterFileCheck.link(this);
m_bytecodeIndex = 0;
- JITStubCall(this, JITStubs::cti_register_file_check).call();
+ JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
@@ -426,7 +478,7 @@ void JIT::privateCompile()
ASSERT(m_jmpTable.isEmpty());
- PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
@@ -474,7 +526,7 @@ void JIT::privateCompile()
// Link absolute addresses for jsr
for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
- patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).addressForJSR());
+ patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
@@ -486,10 +538,10 @@ void JIT::privateCompile()
#if ENABLE(JIT_OPTIMIZE_CALL)
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.ownerCodeBlock = m_codeBlock;
info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
- info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].coldPathOther);
}
#endif
unsigned methodCallCount = m_methodCallCompilationInfo.size();
@@ -503,436 +555,58 @@ void JIT::privateCompile()
m_codeBlock->setJITCode(patchBuffer.finalizeCode());
}
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
-{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) The first function provides fast property access for array length
- Label arrayLengthBegin = align();
-
- // Check eax is an array
- Jump array_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0);
- load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0);
-
- Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), regT0);
- load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
-
- Label virtualCallPreLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
- // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
- Jump isNativeFunc1 = branchTestPtr(NonZero, regT0);
- preverveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction1 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock1.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay1 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- preverveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck1 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay1.link(this);
- isNativeFunc1.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preverveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callDontLazyLinkCall = call();
- emitGetJITStubArg(1, regT2);
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
- // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
- Jump isNativeFunc2 = branchTestPtr(NonZero, regT0);
- preverveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction2 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- preverveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck2 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
- isNativeFunc2.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preverveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
- // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
- Jump isNativeFunc3 = branchTestPtr(NonZero, regT0);
- preverveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction3 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- hasCodeBlock3.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- preverveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck3 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- arityCheckOkay3.link(this);
- // load ctiCode from the new codeBlock.
- loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
- isNativeFunc3.link(this);
-
- compileOpCallInitializeCallFrame();
- jump(regT0);
-
-
- Label nativeCallThunk = align();
- preverveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
-
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-
-#if PLATFORM(X86_64)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
-
- // Allocate stack space for our arglist
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
- COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
-
- // Set up arguments
- subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(X86::ecx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in edx
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
- mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
- subPtr(X86::ecx, X86::edx);
-
- // push pointer to arguments
- storePtr(X86::edx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister
- move(stackPointerRegister, X86::ecx);
-
- // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
- loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
-
- move(callFrameRegister, X86::edi);
-
- call(Address(X86::esi, FIELD_OFFSET(JSFunction, m_data)));
-
- addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
- storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, thisValue)));
-
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
- storePtr(X86::eax, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86::edx);
-
- call(Address(X86::eax, FIELD_OFFSET(JSFunction, m_data)));
-
- // JSValue is a non-POD type
- loadPtr(Address(X86::eax), X86::eax);
-#else
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
-
- // Plant callframe
- move(callFrameRegister, X86::ecx);
- call(Address(X86::edx, FIELD_OFFSET(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif
-
- // Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
- Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2);
- Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3);
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(string_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
-#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck2, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck3, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction2, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction3, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callDontLazyLinkCall, FunctionPtr(JITStubs::cti_vm_dontLazyLinkCall));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(JITStubs::cti_vm_lazyLinkCall));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
- *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- *ctiArrayLengthTrampoline = trampolineAt(finalCode, arrayLengthBegin);
- *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiArrayLengthTrampoline);
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
-}
-
+#if !USE(JSVALUE32_64)
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
{
- loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), dst);
- loadPtr(Address(dst, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), dst);
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
+ loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst);
loadPtr(Address(dst, index * sizeof(Register)), dst);
}
void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index)
{
- loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), variableObject);
- loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), variableObject);
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject);
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
storePtr(src, Address(variableObject, index * sizeof(Register)));
}
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
{
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- callLinkInfo->hotPathBegin.repatch(JSValue::encode(JSValue()));
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
+#if USE(JSVALUE32_64)
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
+#else
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
+#endif
}
-void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount)
+void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
+ ASSERT(calleeCodeBlock);
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
// Currently we only link calls with the exact number of arguments.
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (!calleeCodeBlock || callerArgCount == calleeCodeBlock->m_numParameters) {
+ if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) {
ASSERT(!callLinkInfo->isLinked());
if (calleeCodeBlock)
calleeCodeBlock->addCaller(callLinkInfo);
- callLinkInfo->hotPathBegin.repatch(callee);
- callLinkInfo->hotPathOther.relink(code.addressForCall());
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall());
}
- // patch the instruction that jumps out to the cold path, so that we only try to link once.
- callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
}
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC
#endif // ENABLE(JIT)
-
-// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER)
-
-#if PLATFORM(X86) && !PLATFORM(MAC)
-JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
-#endif
-
-#endif
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index 81f804a..93f47d9 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -59,14 +59,14 @@ namespace JSC {
class Register;
class RegisterFile;
class ScopeChainNode;
- class SimpleJumpTable;
- class StringJumpTable;
class StructureChain;
struct CallLinkInfo;
struct Instruction;
struct OperandTypes;
struct PolymorphicAccessStructureList;
+ struct SimpleJumpTable;
+ struct StringJumpTable;
struct StructureStubInfo;
struct CallRecord {
@@ -153,7 +153,6 @@ namespace JSC {
MacroAssembler::DataLabelPtr hotPathBegin;
MacroAssembler::Call hotPathOther;
MacroAssembler::Call callReturnLocation;
- MacroAssembler::Label coldPathOther;
};
struct MethodCallCompilationInfo {
@@ -167,13 +166,12 @@ namespace JSC {
};
// Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
- void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction);
+ void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
class JIT : private MacroAssembler {
friend class JITStubCall;
- friend class CallEvalJITStub;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
@@ -228,7 +226,7 @@ namespace JSC {
static const FPRegisterID fpRegT0 = X86::xmm0;
static const FPRegisterID fpRegT1 = X86::xmm1;
static const FPRegisterID fpRegT2 = X86::xmm2;
-#elif PLATFORM(ARM_V7)
+#elif PLATFORM_ARM_ARCH(7)
static const RegisterID returnValueRegister = ARM::r0;
static const RegisterID cachedResultRegister = ARM::r0;
static const RegisterID firstArgumentRegister = ARM::r0;
@@ -244,6 +242,31 @@ namespace JSC {
static const FPRegisterID fpRegT0 = ARM::d0;
static const FPRegisterID fpRegT1 = ARM::d1;
static const FPRegisterID fpRegT2 = ARM::d2;
+#elif PLATFORM(ARM)
+ static const RegisterID returnValueRegister = ARM::r0;
+ static const RegisterID cachedResultRegister = ARM::r0;
+ static const RegisterID firstArgumentRegister = ARM::r0;
+
+ static const RegisterID timeoutCheckRegister = ARM::r5;
+ static const RegisterID callFrameRegister = ARM::r4;
+ static const RegisterID ctiReturnRegister = ARM::r6;
+
+ static const RegisterID regT0 = ARM::r0;
+ static const RegisterID regT1 = ARM::r1;
+ static const RegisterID regT2 = ARM::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARM::r7;
+
+ static const RegisterID regS0 = ARM::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARM::S1;
+
+ static const RegisterID regStackPtr = ARM::sp;
+ static const RegisterID regLink = ARM::lr;
+
+ static const FPRegisterID fpRegT0 = ARM::d0;
+ static const FPRegisterID fpRegT1 = ARM::d1;
+ static const FPRegisterID fpRegT2 = ARM::d2;
#else
#error "JIT not supported on this platform."
#endif
@@ -253,81 +276,6 @@ namespace JSC {
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchGetByIdDefaultOffset = 256;
-#if PLATFORM(X86_64)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
- static const int patchLengthPutByIdExternalLoad = 4;
- static const int patchOffsetPutByIdPropertyMapOffset = 31;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
- static const int patchLengthGetByIdExternalLoad = 4;
- static const int patchOffsetGetByIdPropertyMapOffset = 31;
- static const int patchOffsetGetByIdPutResult = 31;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 66;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 44;
-#endif
- static const int patchOffsetOpCallCompareToJump = 9;
-
- static const int patchOffsetMethodCheckProtoObj = 20;
- static const int patchOffsetMethodCheckProtoStruct = 30;
- static const int patchOffsetMethodCheckPutFunction = 50;
-#elif PLATFORM(X86)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 7;
- static const int patchOffsetPutByIdExternalLoad = 13;
- static const int patchLengthPutByIdExternalLoad = 3;
- static const int patchOffsetPutByIdPropertyMapOffset = 22;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 7;
- static const int patchOffsetGetByIdBranchToSlowCase = 13;
- static const int patchOffsetGetByIdExternalLoad = 13;
- static const int patchLengthGetByIdExternalLoad = 3;
- static const int patchOffsetGetByIdPropertyMapOffset = 22;
- static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 31;
-#elif ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 33;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 21;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 23;
-#endif
- static const int patchOffsetOpCallCompareToJump = 6;
-
- static const int patchOffsetMethodCheckProtoObj = 11;
- static const int patchOffsetMethodCheckProtoStruct = 18;
- static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM(ARM_V7)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
- static const int patchLengthPutByIdExternalLoad = 12;
- static const int patchOffsetPutByIdPropertyMapOffset = 40;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
- static const int patchLengthGetByIdExternalLoad = 12;
- static const int patchOffsetGetByIdPropertyMapOffset = 40;
- static const int patchOffsetGetByIdPutResult = 44;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 28;
-#endif
- static const int patchOffsetOpCallCompareToJump = 10;
-
- static const int patchOffsetMethodCheckProtoObj = 18;
- static const int patchOffsetMethodCheckProtoStruct = 28;
- static const int patchOffsetMethodCheckPutFunction = 46;
-#endif
-
public:
static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
{
@@ -335,7 +283,7 @@ namespace JSC {
jit.privateCompile();
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
@@ -357,35 +305,35 @@ namespace JSC {
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress)
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
}
- static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
{
JIT jit(globalData);
- jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
+ jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiStringLengthTrampoline, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
}
- static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
- static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
- static void patchMethodCallProto(MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*);
+ static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress)
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount);
+ static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
static void unlinkCall(CallLinkInfo*);
private:
@@ -406,24 +354,21 @@ namespace JSC {
void privateCompileLinkPass();
void privateCompileSlowCases();
void privateCompile();
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
- void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
- void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress);
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
+ void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
void addSlowCase(Jump);
+ void addSlowCase(JumpList);
void addJump(Jump, int);
void emitJumpSlowToHot(Jump, int);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck = false);
-#endif
void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
void compileOpCallVarargs(Instruction* instruction);
void compileOpCallInitializeCallFrame();
@@ -432,164 +377,374 @@ namespace JSC {
void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
void compileOpConstructSetupArgs(Instruction*);
+
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+#if USE(JSVALUE32_64)
+ Address tagFor(unsigned index, RegisterID base = callFrameRegister);
+ Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
+ Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+
+ bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
+ bool isOperandConstantImmediateDouble(unsigned src);
+
+ void emitLoadTag(unsigned index, RegisterID tag);
+ void emitLoadPayload(unsigned index, RegisterID payload);
+
+ void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
+ void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
+ void emitLoadDouble(unsigned index, FPRegisterID value);
+ void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
+
+ void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
+ void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
+ void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false);
+ void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
+ void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
+ void emitStoreDouble(unsigned index, FPRegisterID value);
+
+ bool isLabeled(unsigned bytecodeIndex);
+ void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ void unmap(RegisterID);
+ void unmap();
+ bool isMapped(unsigned virtualRegisterIndex);
+ bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload);
+ bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag);
+
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex);
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath();
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
+
+ // Arithmetic opcode helpers
+ void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
+
+#if PLATFORM(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 35;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 37;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 25;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 27;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#else
+#error "JSVALUE32_64 not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
+
+ int32_t getConstantOperandImmediateInt(unsigned src);
+
+ void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
+ void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
+
+ void killLastResultRegister();
+
+ Jump emitJumpIfJSCell(RegisterID);
+ Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+#if USE(JSVALUE64)
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
+#else
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
+#endif
+ JIT::Jump emitJumpIfImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+
+#if !USE(JSVALUE64)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithImmToInt(RegisterID);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
- // Arithmetic Ops
+#if PLATFORM(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset = 31;
+ static const int patchOffsetGetByIdPutResult = 31;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 63;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 41;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
- void emit_op_add(Instruction*);
- void emit_op_sub(Instruction*);
- void emit_op_mul(Instruction*);
- void emit_op_mod(Instruction*);
- void emit_op_bitand(Instruction*);
- void emit_op_lshift(Instruction*);
- void emit_op_rshift(Instruction*);
- void emit_op_jnless(Instruction*);
- void emit_op_jnlesseq(Instruction*);
- void emit_op_pre_inc(Instruction*);
- void emit_op_pre_dec(Instruction*);
- void emit_op_post_inc(Instruction*);
- void emit_op_post_dec(Instruction*);
- void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif PLATFORM(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 31;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 21;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
- void emit_op_get_by_val(Instruction*);
- void emit_op_put_by_val(Instruction*);
- void emit_op_put_by_index(Instruction*);
- void emit_op_put_getter(Instruction*);
- void emit_op_put_setter(Instruction*);
- void emit_op_del_by_id(Instruction*);
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif PLATFORM_ARM_ARCH(7)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 40;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 12;
+ static const int patchOffsetGetByIdPropertyMapOffset = 40;
+ static const int patchOffsetGetByIdPutResult = 44;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 10;
- void emit_op_mov(Instruction*);
- void emit_op_end(Instruction*);
- void emit_op_jmp(Instruction*);
- void emit_op_loop(Instruction*);
- void emit_op_loop_if_less(Instruction*);
- void emit_op_loop_if_lesseq(Instruction*);
- void emit_op_new_object(Instruction*);
- void emit_op_put_by_id(Instruction*);
- void emit_op_get_by_id(Instruction*);
- void emit_op_instanceof(Instruction*);
- void emit_op_new_func(Instruction*);
+ static const int patchOffsetMethodCheckProtoObj = 18;
+ static const int patchOffsetMethodCheckProtoStruct = 28;
+ static const int patchOffsetMethodCheckPutFunction = 46;
+#endif
+#endif // USE(JSVALUE32_64)
+
+ void emit_op_add(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
void emit_op_call_eval(Instruction*);
- void emit_op_method_check(Instruction*);
- void emit_op_load_varargs(Instruction*);
void emit_op_call_varargs(Instruction*);
+ void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
+ void emit_op_construct_verify(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+ void emit_op_div(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_enter_with_activation(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_get_by_val(Instruction*);
void emit_op_get_global_var(Instruction*);
- void emit_op_put_global_var(Instruction*);
void emit_op_get_scoped_var(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
- void emit_op_tear_off_activation(Instruction*);
- void emit_op_tear_off_arguments(Instruction*);
- void emit_op_ret(Instruction*);
- void emit_op_new_array(Instruction*);
- void emit_op_resolve(Instruction*);
- void emit_op_construct_verify(Instruction*);
- void emit_op_to_primitive(Instruction*);
- void emit_op_strcat(Instruction*);
- void emit_op_resolve_func(Instruction*);
- void emit_op_loop_if_true(Instruction*);
- void emit_op_resolve_base(Instruction*);
- void emit_op_resolve_skip(Instruction*);
- void emit_op_resolve_global(Instruction*);
- void emit_op_not(Instruction*);
- void emit_op_jfalse(Instruction*);
+ void emit_op_init_arguments(Instruction*);
+ void emit_op_instanceof(Instruction*);
void emit_op_jeq_null(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
void emit_op_jneq_null(Instruction*);
void emit_op_jneq_ptr(Instruction*);
- void emit_op_unexpected_load(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jnlesseq(Instruction*);
void emit_op_jsr(Instruction*);
- void emit_op_sret(Instruction*);
- void emit_op_eq(Instruction*);
- void emit_op_bitnot(Instruction*);
- void emit_op_resolve_with_base(Instruction*);
- void emit_op_new_func_exp(Instruction*);
void emit_op_jtrue(Instruction*);
+ void emit_op_load_varargs(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_mov(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_negate(Instruction*);
void emit_op_neq(Instruction*);
- void emit_op_bitxor(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_new_error(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
- void emit_op_bitor(Instruction*);
- void emit_op_throw(Instruction*);
void emit_op_next_pname(Instruction*);
- void emit_op_push_scope(Instruction*);
- void emit_op_pop_scope(Instruction*);
- void emit_op_stricteq(Instruction*);
+ void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
- void emit_op_to_jsnumber(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
void emit_op_push_new_scope(Instruction*);
- void emit_op_catch(Instruction*);
- void emit_op_jmp_scopes(Instruction*);
- void emit_op_switch_imm(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_resolve_global(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_sub(Instruction*);
void emit_op_switch_char(Instruction*);
+ void emit_op_switch_imm(Instruction*);
void emit_op_switch_string(Instruction*);
- void emit_op_new_error(Instruction*);
- void emit_op_debug(Instruction*);
- void emit_op_eq_null(Instruction*);
- void emit_op_neq_null(Instruction*);
- void emit_op_enter(Instruction*);
- void emit_op_enter_with_activation(Instruction*);
- void emit_op_init_arguments(Instruction*);
- void emit_op_create_arguments(Instruction*);
- void emit_op_convert_this(Instruction*);
- void emit_op_profile_will_call(Instruction*);
- void emit_op_profile_did_call(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
- void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
-#if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
- void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
-#endif
-
- void emitGetVirtualRegister(int src, RegisterID dst);
- void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
- void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
-
+ /* These functions are deprecated: Please use JITStubCall instead. */
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
+#if USE(JSVALUE32_64)
+ void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2);
+#else
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
+#endif
void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
@@ -602,30 +757,8 @@ namespace JSC {
void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
JSValue getConstantOperand(unsigned src);
- int32_t getConstantOperandImmediateInt(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
- Jump emitJumpIfJSCell(RegisterID);
- Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfJSCell(RegisterID);
- Jump emitJumpIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
-#if USE(ALTERNATE_JSIMMEDIATE)
- JIT::Jump emitJumpIfImmediateNumber(RegisterID);
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
-#else
- JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
- {
- return emitJumpIfImmediateInteger(reg);
- }
-
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
- {
- return emitJumpIfNotImmediateInteger(reg);
- }
-#endif
-
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
return iter++->from;
@@ -637,43 +770,22 @@ namespace JSC {
}
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
- JIT::Jump emitJumpIfImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
- void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-
Jump checkStructure(RegisterID reg, Structure* structure);
-#if !USE(ALTERNATE_JSIMMEDIATE)
- void emitFastArithDeTagImmediate(RegisterID);
- Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
-#endif
- void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithImmToInt(RegisterID);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
-
- void emitTagAsBoolImmediate(RegisterID reg);
-
void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
Call emitNakedCall(CodePtr function = CodePtr());
- void preverveReturnAddressAfterCall(RegisterID);
+
+ void preserveReturnAddressAfterCall(RegisterID);
void restoreReturnAddressBeforeReturn(RegisterID);
void restoreReturnAddressBeforeReturn(Address);
- void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
- void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
-
void emitTimeoutCheck();
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
#endif
- void killLastResultRegister();
-
-
#if ENABLE(SAMPLING_FLAGS)
void setSamplingFlag(int32_t);
void clearSamplingFlag(int32_t);
@@ -709,15 +821,22 @@ namespace JSC {
Vector<SlowCaseEntry> m_slowCases;
Vector<SwitchRecord> m_switches;
- int m_lastResultBytecodeRegister;
- unsigned m_jumpTargetsPosition;
-
unsigned m_propertyAccessInstructionIndex;
unsigned m_globalResolveInfoIndex;
unsigned m_callLinkInfoIndex;
- } JIT_CLASS_ALIGNMENT;
-}
+#if USE(JSVALUE32_64)
+ unsigned m_jumpTargetIndex;
+ unsigned m_mappedBytecodeIndex;
+ unsigned m_mappedVirtualRegisterIndex;
+ RegisterID m_mappedTag;
+ RegisterID m_mappedPayload;
+#else
+ int m_lastResultBytecodeRegister;
+ unsigned m_jumpTargetsPosition;
+#endif
+ } JIT_CLASS_ALIGNMENT;
+} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 86c01d9..ea343d8 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -41,11 +41,1095 @@
#include <stdio.h>
#endif
-
using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(Equal, regT0, Imm32(0)));
+
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ xor32(Imm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Int32 less.
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target + 3);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Int32 less.
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, regT2), target + 3);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // non-sse case
+ return;
+ }
+
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div:
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT2), dst + 3);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT2), dst + 3);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div:
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst + 3);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), dst + 3);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ if (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) {
+ m_assembler.cvttsd2si_rr(fpRegT0, regT0);
+ convertInt32ToDouble(regT0, fpRegT1);
+ m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
+
+ doubleResult.append(m_assembler.jne());
+ doubleResult.append(m_assembler.jp());
+
+ doubleResult.append(branchTest32(Zero, regT0));
+
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
+ }
+
+ // Double result.
+ doubleResult.link(this);
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ emitLoad(op1, X86::edx, X86::eax);
+ move(Imm32(getConstantOperand(op2).asInt32()), X86::ecx);
+ addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
+ if (getConstantOperand(op2).asInt32() == -1)
+ addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ } else {
+ emitLoad2(op1, X86::edx, X86::eax, op2, X86::ebx, X86::ecx);
+ addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, X86::ebx, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, X86::ecx, Imm32(0))); // divide by 0
+ }
+
+ move(X86::eax, X86::ebx); // Save dividend payload, in case of 0.
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
+
+ // If the remainder is zero and the dividend is negative, the result is -0.
+ Jump storeResult1 = branchTest32(NonZero, X86::edx);
+ Jump storeResult2 = branchTest32(Zero, X86::ebx, Imm32(0x80000000)); // not negative
+ emitStore(dst, jsNumber(m_globalData, -0.0));
+ Jump end = jump();
+
+ storeResult1.link(this);
+ storeResult2.link(this);
+ emitStoreInt32(dst, X86::edx, (op1 == dst || op2 == dst));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ linkSlowCase(iter); // int32 check
+ if (getConstantOperand(op2).asInt32() == -1)
+ linkSlowCase(iter); // 0x80000000 check
+ } else {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // 0x80000000 check
+ }
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+#else // PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+}
+
+#endif // PLATFORM(X86) || PLATFORM(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+#else // USE(JSVALUE32_64)
+
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -64,7 +1148,7 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
and32(Imm32(0x1f), regT2);
#endif
lshift32(regT2, regT0);
-#if !USE(ALTERNATE_JSIMMEDIATE)
+#if !USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, regT0, regT0));
signExtend32ToPtr(regT0, regT0);
#endif
@@ -78,7 +1162,7 @@ void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEnt
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
UNUSED_PARAM(op1);
UNUSED_PARAM(op2);
linkSlowCase(iter);
@@ -92,7 +1176,7 @@ void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEnt
notImm1.link(this);
notImm2.link(this);
#endif
- JITStubCall stubCall(this, JITStubs::cti_op_lshift);
+ JITStubCall stubCall(this, cti_op_lshift);
stubCall.addArgument(regT0);
stubCall.addArgument(regT2);
stubCall.call(result);
@@ -109,7 +1193,7 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
#else
rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
@@ -118,16 +1202,17 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegisters(op1, regT0, op2, regT2);
if (supportsFloatingPointTruncate()) {
Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- // supportsFloatingPoint() && USE(ALTERNATE_JSIMMEDIATE) => 3 SlowCases
+#if USE(JSVALUE64)
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
#else
- // supportsFloatingPoint() && !USE(ALTERNATE_JSIMMEDIATE) => 5 SlowCases (of which 1 IfNotJSCell)
+ // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
emitJumpSlowCaseIfNotJSCell(regT0, op1);
addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
addSlowCase(branchAdd32(Overflow, regT0, regT0));
#endif
@@ -144,13 +1229,13 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
// On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
and32(Imm32(0x1f), regT2);
#endif
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
rshift32(regT2, regT0);
#else
rshiftPtr(regT2, regT0);
#endif
}
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
@@ -164,7 +1249,7 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- JITStubCall stubCall(this, JITStubs::cti_op_rshift);
+ JITStubCall stubCall(this, cti_op_rshift);
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
@@ -172,7 +1257,7 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(op2, regT2);
} else {
if (supportsFloatingPointTruncate()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -213,7 +1298,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -222,7 +1307,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -252,7 +1337,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
@@ -262,10 +1347,10 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
fail1 = emitJumpIfNotJSCell(regT0);
Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
- int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -274,7 +1359,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op1))
@@ -283,7 +1368,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
@@ -293,7 +1378,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
@@ -303,10 +1388,10 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
fail1 = emitJumpIfNotJSCell(regT1);
Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -315,7 +1400,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op2))
@@ -324,7 +1409,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
@@ -334,7 +1419,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -353,15 +1438,15 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
- loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
@@ -376,7 +1461,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
}
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -398,7 +1483,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -407,7 +1492,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -437,7 +1522,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
@@ -447,10 +1532,10 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
fail1 = emitJumpIfNotJSCell(regT0);
Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
- int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -459,7 +1544,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op1))
@@ -468,7 +1553,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
@@ -478,7 +1563,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
@@ -488,10 +1573,10 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
fail1 = emitJumpIfNotJSCell(regT1);
Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -500,7 +1585,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op2))
@@ -509,7 +1594,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
@@ -519,7 +1604,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -538,15 +1623,15 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
- loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
@@ -561,7 +1646,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
}
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -578,7 +1663,7 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op1);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
@@ -589,7 +1674,7 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op2);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
@@ -613,17 +1698,17 @@ void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (isOperandConstantImmediateInt(op1)) {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call(result);
} else if (isOperandConstantImmediateInt(op2)) {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
} else {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call(result);
@@ -638,7 +1723,7 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
#else
@@ -656,7 +1741,7 @@ void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
+ JITStubCall stubCall(this, cti_op_post_inc);
stubCall.addArgument(regT0);
stubCall.addArgument(Imm32(srcDst));
stubCall.call(result);
@@ -670,7 +1755,7 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
#else
@@ -688,7 +1773,7 @@ void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
+ JITStubCall stubCall(this, cti_op_post_dec);
stubCall.addArgument(regT0);
stubCall.addArgument(Imm32(srcDst));
stubCall.call(result);
@@ -700,7 +1785,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -718,7 +1803,7 @@ void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
+ JITStubCall stubCall(this, cti_op_pre_inc);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
@@ -729,7 +1814,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -747,7 +1832,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
+ JITStubCall stubCall(this, cti_op_pre_dec);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
@@ -765,7 +1850,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
m_assembler.cdq();
m_assembler.idivl_r(X86::ecx);
@@ -784,7 +1869,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
{
unsigned result = currentInstruction[1].u.operand;
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -797,7 +1882,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
notImm1.link(this);
notImm2.link(this);
#endif
- JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ JITStubCall stubCall(this, cti_op_mod);
stubCall.addArgument(X86::eax);
stubCall.addArgument(X86::ecx);
stubCall.call(result);
@@ -811,7 +1896,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ JITStubCall stubCall(this, cti_op_mod);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -826,64 +1911,9 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
/* ------------------------------ END: OP_MOD ------------------------------ */
-#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#if USE(JSVALUE64)
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, JITStubs::cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, JITStubs::cti_op_sub);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
-
-/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
{
@@ -916,7 +1946,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitGetVirtualRegister(op1, regT0);
Label stubFunctionCall(this);
- JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(result);
@@ -967,7 +1997,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -999,7 +2029,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1044,7 +2074,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, cti_op_mul);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1074,9 +2104,9 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
}
-#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#else // USE(JSVALUE64)
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+/* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
{
@@ -1105,7 +2135,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
}
// (1a) if we get here, src1 is also a number cell
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
Jump loadedDouble = jump();
// (1b) if we get here, src1 is an immediate
op1imm.link(this);
@@ -1114,16 +2144,16 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
// (1c)
loadedDouble.link(this);
if (opcodeID == op_add)
- addDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
else if (opcodeID == op_sub)
- subDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
else {
ASSERT(opcodeID == op_mul);
- mulDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
}
// Store the result to the JSNumberCell and jump.
- storeDouble(fpRegT0, Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)));
+ storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
move(regT1, regT0);
emitPutVirtualRegister(dst);
wasJSNumberCell2 = jump();
@@ -1151,7 +2181,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
}
// (1a) if we get here, src2 is also a number cell
- loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
Jump loadedDouble = jump();
// (1b) if we get here, src2 is an immediate
op2imm.link(this);
@@ -1159,7 +2189,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
convertInt32ToDouble(regT1, fpRegT1);
// (1c)
loadedDouble.link(this);
- loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
if (opcodeID == op_add)
addDouble(fpRegT1, fpRegT0);
else if (opcodeID == op_sub)
@@ -1168,11 +2198,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
ASSERT(opcodeID == op_mul);
mulDouble(fpRegT1, fpRegT0);
}
- storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
+ storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
emitPutVirtualRegister(dst);
// Store the result to the JSNumberCell and jump.
- storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
+ storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
emitPutVirtualRegister(dst);
wasJSNumberCell1 = jump();
@@ -1243,7 +2273,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
if (opcodeID == op_mul)
linkSlowCase(iter);
- JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
stubCall.addArgument(src1, regT2);
stubCall.addArgument(src2, regT2);
stubCall.call(dst);
@@ -1272,7 +2302,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
if (types.first().mightBeNumber() && types.second().mightBeNumber())
compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
else {
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1291,7 +2321,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call(result);
@@ -1300,7 +2330,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1350,7 +2380,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, cti_op_mul);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1368,10 +2398,12 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#endif // USE(JSVALUE64)
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index cf852be..7fdb845 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -45,11 +45,401 @@ using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
void JIT::compileOpCallInitializeCallFrame()
{
+ // regT0 holds callee, regT1 holds argCount
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain
+
+ emitStore(static_cast<unsigned>(RegisterFile::OptionalCalleeArguments), JSValue());
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
+ storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
+}
+
+void JIT::compileOpCallSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitPutJITStubArgConstant(registerOffset, 3);
+ emitPutJITStubArgConstant(argCount, 5);
+}
+
+void JIT::compileOpConstructSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitPutJITStubArgConstant(registerOffset, 3);
+ emitPutJITStubArgConstant(argCount, 5);
+ emitPutJITStubArgFromVirtualRegister(proto, 7, regT2, regT3);
+ emitPutJITStubArgConstant(thisRegister, 9);
+}
+
+void JIT::compileOpCallVarargsSetupArgs(Instruction*)
+{
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitPutJITStubArg(regT3, 3); // registerOffset
+ emitPutJITStubArg(regT2, 5); // argCount
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCountRegister = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ emitLoad(callee, regT1, regT0);
+ emitLoadPayload(argCountRegister, regT2); // argCount
+ addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
+
+ compileOpCallVarargsSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT3, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ move(regT3, callFrameRegister);
+
+ move(regT2, regT1); // argCount
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ emitStore(dst, regT1, regT0);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_call_varargs), dst, regT1, regT0);
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_construct_verify(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitLoad(dst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+}
+
+void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ Jump wasEval1;
+ Jump wasEval2;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval1 = branchTest32(NonZero, regT0);
+ wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ }
+
+ emitLoad(callee, regT1, regT2);
+
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT2);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ if (opcodeID == op_call_eval) {
+ wasEval1.link(this);
+ wasEval2.link(this);
+ }
+
+ emitStore(dst, regT1, regT0);;
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ Jump wasEval1;
+ Jump wasEval2;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval1 = branchTest32(NonZero, regT0);
+ wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // In the case of OpConstruct, call out to a cti_ function to create the new object.
+ if (opcodeID == op_construct) {
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ JITStubCall stubCall(this, cti_op_construct_JSConstruct);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(proto);
+ stubCall.call(thisRegister);
+
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ emitStore(registerOffset + RegisterFile::OptionalCalleeArguments, JSValue());
+ emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval) {
+ wasEval1.link(this);
+ wasEval2.link(this);
+ }
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + opcodeLengths[opcodeID], dst, regT1, regT0);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
+
+ // Put the return value in dst.
+ emitStore(dst, regT1, regT0);;
+ sampleCodeBlock(m_codeBlock);
+
+ // If not, we need an extra case in the if below!
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Done! - return back to the hot path.
+ if (opcodeID == op_construct)
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
+ else
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+ JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
+
+ emitStore(dst, regT1, regT0);;
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+#else // USE(JSVALUE32_64)
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
@@ -128,7 +518,7 @@ void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCase
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
sampleCodeBlock(m_codeBlock);
@@ -148,7 +538,11 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- CallEvalJITStub(this, instruction).call();
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT2);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
@@ -165,7 +559,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
emitGetVirtualRegister(callee, regT2);
}
@@ -191,7 +585,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
sampleCodeBlock(m_codeBlock);
@@ -211,7 +605,11 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- CallEvalJITStub(this, instruction).call();
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT2);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
@@ -233,7 +631,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
emitPutJITStubArg(regT2, 1);
emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
+ JITStubCall stubCall(this, cti_op_construct_JSConstruct);
stubCall.call(thisRegister);
emitGetVirtualRegister(callee, regT2);
}
@@ -242,7 +640,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
@@ -281,64 +679,36 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
emitGetVirtualRegister(callee, regT2);
}
- move(Imm32(argCount), regT1);
-
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink());
-
- Jump storeResultForFirstRun = jump();
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
- // This is the address for the cold path *after* the first run (which tries to link the call).
- m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this);
+ // Put the return value in dst.
+ emitPutVirtualRegister(dst);
+ sampleCodeBlock(m_codeBlock);
- // The arguments have been set up on the hot path for op_call_eval
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
+ // If not, we need an extra case in the if below!
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
- // Check for JSFunctions.
- Jump isNotObject = emitJumpIfNotJSCell(regT2);
- Jump isJSFunction = branchPtr(Equal, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
+ // Done! - return back to the hot path.
+ if (opcodeID == op_construct)
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
+ else
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
// This handles host functions
- isNotObject.link(this);
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
- JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
- stubCall.call();
- Jump wasNotJSFunction = jump();
-
- // Next, handle JSFunctions...
- isJSFunction.link(this);
+ JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
- stubCall.call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT2);
- }
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
-
- // Put the return value in dst. In the interpreter, op_ret does this.
- wasNotJSFunction.link(this);
- storeResultForFirstRun.link(this);
emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
@@ -346,6 +716,8 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h
index 7ee644b..69cf167 100644
--- a/JavaScriptCore/jit/JITCode.h
+++ b/JavaScriptCore/jit/JITCode.h
@@ -76,20 +76,19 @@ namespace JSC {
// Execute the code!
inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
{
- return JSValue::decode(ctiTrampoline(
-#if PLATFORM(X86_64)
- 0, 0, 0, 0, 0, 0,
-#endif
- m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ return JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ }
+
+ void* start()
+ {
+ return m_ref.m_code.dataLocation();
}
-#ifndef NDEBUG
size_t size()
{
ASSERT(m_ref.m_code.executableAddress());
return m_ref.m_size;
}
-#endif
ExecutablePool* getExecutablePool()
{
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index b3dc418..b5aaafc 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -30,66 +30,9 @@
#if ENABLE(JIT)
-#if PLATFORM(WIN)
-#undef FIELD_OFFSET // Fix conflict with winnt.h.
-#endif
-
-// FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.
-// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
-// NULL can cause compiler problems, especially in cases of multiple inheritance.
-#define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
-
namespace JSC {
-ALWAYS_INLINE void JIT::killLastResultRegister()
-{
- m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
-}
-
-// get arg puts an arg from the SF register array into a h/w register
-ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValue::encode(value)), dst);
- killLastResultRegister();
- return;
- }
-
- if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
- bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
- atJumpTarget = true;
- ++m_jumpTargetsPosition;
- }
-
- if (!atJumpTarget) {
- // The argument we want is already stored in eax
- if (dst != cachedResultRegister)
- move(cachedResultRegister, dst);
- killLastResultRegister();
- return;
- }
- }
-
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
- killLastResultRegister();
-}
-
-ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
-{
- if (src2 == m_lastResultBytecodeRegister) {
- emitGetVirtualRegister(src2, dst2);
- emitGetVirtualRegister(src1, dst1);
- } else {
- emitGetVirtualRegister(src1, dst1);
- emitGetVirtualRegister(src2, dst2);
- }
-}
+/* Deprecated: Please use JITStubCall instead. */
// puts an arg onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
@@ -97,16 +40,22 @@ ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumbe
poke(src, argumentNumber);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
{
poke(Imm32(value), argumentNumber);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
{
poke(ImmPtr(value), argumentNumber);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
peek(dst, argumentNumber);
@@ -118,30 +67,6 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
-{
- return getConstantOperand(src).getInt32Fast();
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast();
-}
-
-// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
-{
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
- } else {
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
- emitPutJITStubArg(scratch, argumentNumber);
- }
-
- killLastResultRegister();
-}
-
ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
{
storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
@@ -155,26 +80,17 @@ ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterF
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
+#if !USE(JSVALUE32_64)
killLastResultRegister();
+#endif
}
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
+#if !USE(JSVALUE32_64)
killLastResultRegister();
-}
-
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
-{
- storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
- // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
-}
-
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
-{
- storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
- // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+#endif
}
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
@@ -186,9 +102,9 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
return nakedCall;
}
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if PLATFORM(X86) || PLATFORM(X86_64) || (PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7))
-ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
pop(reg);
}
@@ -203,9 +119,9 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
push(address);
}
-#elif PLATFORM(ARM_V7)
+#elif PLATFORM_ARM_ARCH(7)
-ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
move(linkRegister, reg);
}
@@ -225,21 +141,24 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
- poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
#else
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+#if PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7)
+ move(ctiReturnRegister, ARM::lr);
+#endif
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if PLATFORM(X86)
// Within a trampoline the return address will be on the stack at this point.
addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif PLATFORM(ARM_V7)
+#elif PLATFORM_ARM_ARCH(7)
move(stackPointerRegister, firstArgumentRegister);
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
@@ -248,12 +167,477 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
+ return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
+}
+
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ const JumpList::JumpVector& jumpVector = jumpList.jumps();
+ size_t size = jumpVector.size();
+ for (size_t i = 0; i < size; ++i)
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
+{
+#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+ addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
+#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+ intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
+ add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
+ addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+#else
+#error "SAMPLING_FLAGS not implemented on this platform."
+#endif
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
+ storePtr(ImmPtr(codeBlock), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
+
+#if USE(JSVALUE32_64)
+
+inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+}
+
+inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+}
+
+inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)));
+}
+
+inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
+{
+ RegisterID mappedTag;
+ if (getMappedTag(index, mappedTag)) {
+ move(mappedTag, tag);
+ unmap(tag);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).tag()), tag);
+ unmap(tag);
+ return;
+ }
+
+ load32(tagFor(index), tag);
+ unmap(tag);
+}
+
+inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
+{
+ RegisterID mappedPayload;
+ if (getMappedPayload(index, mappedPayload)) {
+ move(mappedPayload, payload);
+ unmap(payload);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).payload()), payload);
+ unmap(payload);
+ return;
+ }
+
+ load32(payloadFor(index), payload);
+ unmap(payload);
+}
+
+inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
+{
+ move(Imm32(v.payload()), payload);
+ move(Imm32(v.tag()), tag);
+}
+
+inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ ASSERT(tag != payload);
+
+ if (base == callFrameRegister) {
+ ASSERT(payload != base);
+ emitLoadPayload(index, payload);
+ emitLoadTag(index, tag);
+ return;
+ }
+
+ if (payload == base) { // avoid stomping base
+ load32(tagFor(index, base), tag);
+ load32(payloadFor(index, base), payload);
+ return;
+ }
+
+ load32(payloadFor(index, base), payload);
+ load32(tagFor(index, base), tag);
+}
+
+inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
+{
+ if (isMapped(index1)) {
+ emitLoad(index1, tag1, payload1);
+ emitLoad(index2, tag2, payload2);
+ return;
+ }
+ emitLoad(index2, tag2, payload2);
+ emitLoad(index1, tag1, payload1);
+}
+
+inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
+ convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ } else
+ convertInt32ToDouble(payloadFor(index), value);
+}
+
+inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ store32(payload, payloadFor(index, base));
+ store32(tag, tagFor(index, base));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsCell)
+ store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
+{
+ if (!indexIsBool)
+ store32(Imm32(0), payloadFor(index, callFrameRegister));
+ store32(tag, tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
+{
+ storeDouble(value, addressFor(index));
+}
+
+inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
+{
+ store32(Imm32(constant.payload()), payloadFor(index, base));
+ store32(Imm32(constant.tag()), tagFor(index, base));
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ emitStore(dst, jsUndefined());
+}
+
+inline bool JIT::isLabeled(unsigned bytecodeIndex)
+{
+ for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
+ unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
+ if (jumpTarget == bytecodeIndex)
+ return true;
+ if (jumpTarget > bytecodeIndex)
+ return false;
+ }
+ return false;
+}
+
+inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+{
+ if (isLabeled(bytecodeIndex))
+ return;
+
+ m_mappedBytecodeIndex = bytecodeIndex;
+ m_mappedVirtualRegisterIndex = virtualRegisterIndex;
+ m_mappedTag = tag;
+ m_mappedPayload = payload;
+}
+
+inline void JIT::unmap(RegisterID registerID)
+{
+ if (m_mappedTag == registerID)
+ m_mappedTag = (RegisterID)-1;
+ else if (m_mappedPayload == registerID)
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline void JIT::unmap()
+{
+ m_mappedBytecodeIndex = (unsigned)-1;
+ m_mappedVirtualRegisterIndex = (unsigned)-1;
+ m_mappedTag = (RegisterID)-1;
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline bool JIT::isMapped(unsigned virtualRegisterIndex)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ return true;
+}
+
+inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedPayload == (RegisterID)-1)
+ return false;
+ payload = m_mappedPayload;
+ return true;
+}
+
+inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedTag == (RegisterID)-1)
+ return false;
+ tag = m_mappedTag;
+ return true;
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+}
+
+inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
+{
+ if (isOperandConstantImmediateInt(op1)) {
+ constant = getConstantOperand(op1).asInt32();
+ op = op2;
+ return true;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ constant = getConstantOperand(op2).asInt32();
+ op = op1;
+ return true;
+ }
+
+ return false;
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
+}
+
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
+{
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue constant = m_codeBlock->getConstant(src);
+ poke(Imm32(constant.payload()), argumentNumber);
+ poke(Imm32(constant.tag()), argumentNumber + 1);
+ } else {
+ emitLoad(src, scratch1, scratch2);
+ poke(scratch2, argumentNumber);
+ poke(scratch1, argumentNumber + 1);
+ }
+}
+
+#else // USE(JSVALUE32_64)
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
+ bool atJumpTarget = false;
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ atJumpTarget = true;
+ ++m_jumpTargetsPosition;
+ }
+
+ if (!atJumpTarget) {
+ // The argument we want is already stored in eax
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
+ killLastResultRegister();
+ return;
+ }
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchTestPtr(Zero, reg, tagMaskRegister);
#else
return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
@@ -274,7 +658,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchTestPtr(NonZero, reg, tagMaskRegister);
#else
return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
@@ -292,13 +676,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
emitJumpSlowCaseIfNotJSCell(reg);
}
-ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
-{
- if (!m_codeBlock->isKnownNotImmediate(vReg))
- linkSlowCase(iter);
-}
-
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
{
return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
@@ -311,7 +689,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
#else
return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
@@ -320,7 +698,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchPtr(Below, reg, tagTypeNumberRegister);
#else
return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
@@ -344,7 +722,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1,
addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
}
-#if !USE(ALTERNATE_JSIMMEDIATE)
+#if !USE(JSVALUE64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
@@ -358,7 +736,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID re
ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(src, dest);
#else
if (src != dest)
@@ -369,7 +747,7 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
UNUSED_PARAM(reg);
#else
rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
@@ -379,7 +757,7 @@ ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
if (src != dest)
move(src, dest);
orPtr(tagTypeNumberRegister, dest);
@@ -396,88 +774,25 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
}
-ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
-}
-
-ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
-}
+/* Deprecated: Please use JITStubCall instead. */
-#if ENABLE(SAMPLING_FLAGS)
-ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
-{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-
-ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-#endif
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
+ } else {
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
+ emitPutJITStubArg(scratch, argumentNumber);
+ }
-#if ENABLE(SAMPLING_COUNTERS)
-ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
-{
-#if PLATFORM(X86_64) // Or any other 64-bit plattform.
- addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
-#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
- intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
- add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
- addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
-#else
-#error "SAMPLING_FLAGS not implemented on this platform."
-#endif
+ killLastResultRegister();
}
-#endif
-#if ENABLE(OPCODE_SAMPLING)
-#if PLATFORM(X86_64)
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
-}
-#endif
-#endif
+#endif // USE(JSVALUE32_64)
-#if ENABLE(CODEBLOCK_SAMPLING)
-#if PLATFORM(X86_64)
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
- storePtr(ImmPtr(codeBlock), X86::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
-}
-#endif
-#endif
-}
+} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 1737551..13fc981 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -32,12 +32,1785 @@
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
+#include "JSFunction.h"
+#include "LinkBuffer.h"
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(UString::Rep, len)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+
+ ret();
+#endif
+
+ // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ /* VirtualCallLink Trampoline */
+ Label virtualCallLinkBegin = align();
+
+ // regT0 holds callee, regT1 holds argCount.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
+ Jump hasCodeBlock2 = branchTestPtr(NonZero, regT2);
+
+ // Lazily generate a CodeBlock.
+ preserveReturnAddressAfterCall(regT3); // return address
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ move(regT0, regT2);
+ emitGetJITStubArg(1, regT0); // callee
+ emitGetJITStubArg(5, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3); // return address
+ hasCodeBlock2.link(this);
+
+ // regT2 holds codeBlock.
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 3); // return address
+ emitPutJITStubArg(regT2, 7); // codeBlock
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT0); // callee
+ emitGetJITStubArg(5, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3); // return address
+
+ arityCheckOkay2.link(this);
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 3);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+ /* VirtualCall Trampoline */
+ Label virtualCallBegin = align();
+
+ // regT0 holds callee, regT1 holds argCount.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
+ Jump hasCodeBlock3 = branchTestPtr(NonZero, regT2);
+
+ // Lazily generate a CodeBlock.
+ preserveReturnAddressAfterCall(regT3); // return address
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ move(regT0, regT2);
+ emitGetJITStubArg(1, regT0); // callee
+ emitGetJITStubArg(5, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3); // return address
+ hasCodeBlock3.link(this);
+
+ // regT2 holds codeBlock.
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 3); // return address
+ emitPutJITStubArg(regT2, 7); // codeBlock
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT0); // callee
+ emitGetJITStubArg(5, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3); // return address
+
+ arityCheckOkay3.link(this);
+ isNativeFunc3.link(this);
+ compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
+ jump(regT0);
+
+#if PLATFORM(X86)
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
+ storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
+ storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86::edx);
+
+ call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type, so eax points to it
+ emitLoad(0, regT1, regT0, X86::eax);
+#else
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx); // callee
+ move(callFrameRegister, X86::ecx); // callFrame
+ call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+ // Check for an exception
+ // FIXME: Maybe we can optimize this comparison to JSValue().
+ move(ImmPtr(&globalData->exception), regT2);
+ Jump sawException1 = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::CellTag));
+ Jump sawException2 = branch32(NonZero, payloadFor(0, regT2), Imm32(0));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // Handle an exception
+ sawException1.link(this);
+ sawException2.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+#endif
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiStringLengthTrampoline);
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+#else
+ UNUSED_PARAM(ctiVirtualCallLink);
+#endif
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ emitTimeoutCheck();
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target + 3);
+}
+
+void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_less);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target + 3);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(proto, regT1);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(value, regT2);
+
+ // Check that baseVal & proto are cells.
+ emitJumpSlowCaseIfNotJSCell(proto);
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // FIXME: Maybe remove this test.
+ addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance))); // FIXME: TOT checks ImplementsDefaultHasInstance.
+
+ // If value is not an Object, return false.
+ emitLoadTag(value, regT0);
+ Jump valueIsImmediate = branch32(NotEqual, regT0, Imm32(JSValue::CellTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // FIXME: Maybe remove this test.
+
+ // Check proto is object.
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(Imm32(JSValue::TrueTag), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branch32(NotEqual, regT2, Imm32(0), loop);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ valueIsImmediate.link(this);
+ valueIsNotObject.link(this);
+ move(Imm32(JSValue::FalseTag), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[3].u.operand;
+
+ loadPtr(&globalObject->d()->registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ loadPtr(&globalObject->d()->registers, regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+ JITStubCall(this, cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitTimeoutCheck();
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addJump(branch32(NotEqual, regT0, Imm32(0)), target + 2);
+ Jump isNotZero = jump();
+
+ isNotInteger.link(this);
+
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag)));
+
+ isNotZero.link(this);
+}
+
+void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Verify structure.
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
+ load32(offsetAddr, regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ xor32(Imm32(JSValue::FalseTag), regT0);
+ addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
+ xor32(Imm32(JSValue::TrueTag), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target + 2);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
+ addJump(jump(), target + 2);
+
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
+
+ isTrue.link(this);
+ isTrue2.link(this);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
+ addJump(jump(), target + 2);
+
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
+
+ isFalse.link(this);
+ isFalse2.link(this);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(NonZero, regT1), target + 2);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(Zero, regT1), target + 2);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target + 3);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target + 3);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target + 2);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(Equal, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(NotEqual, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(Imm32(0x1), regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoadTag(src1, regT0);
+ emitLoadTag(src2, regT1);
+
+ // Jump to a slow case if either operand is double, or if both operands are
+ // cells and/or Int32s.
+ move(regT0, regT2);
+ and32(regT1, regT2);
+ addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
+
+ if (type == OpStrictEq)
+ set8(Equal, regT0, regT1, regT0);
+ else
+ set8(NotEqual, regT0, regT1, regT0);
+
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int iter = currentInstruction[2].u.operand;
+ int target = currentInstruction[3].u.operand;
+
+ load32(Address(callFrameRegister, (iter * sizeof(Register))), regT0);
+
+ JITStubCall stubCall(this, cti_op_next_pname);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+
+ Jump endOfIter = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_next_pname), dst, regT1, regT0);
+ addJump(jump(), target + 3);
+ endOfIter.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::DeletedValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+
+ // This opcode only executes after a return from cti_op_throw.
+
+ // cti_op_throw may have taken us to a call frame further up the stack; reload
+ // the call frame pointer to adjust.
+ peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+
+ // Now store the exception returned by cti_op_throw.
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned type = currentInstruction[2].u.operand;
+ unsigned message = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_new_error);
+ stubCall.addArgument(Imm32(type));
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+ emit_op_enter(currentInstruction);
+
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+ Jump argsNotCell = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::CellTag));
+ Jump argsNotNull = branchTestPtr(NonZero, payloadFor(RegisterFile::ArgumentsRegister, callFrameRegister));
+
+ // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ argsNotCell.link(this);
+ argsNotNull.link(this);
+}
+
+void JIT::emit_op_init_arguments(Instruction*)
+{
+ emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+#else // USE(JSVALUE32_64)
+
#define RECORD_JUMP_TARGET(targetOffset) \
do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
+
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+#endif
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+
+ Label virtualCallLinkBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock2.link(this);
+
+ Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay2.link(this);
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+
+ jump(regT0);
+
+ Label virtualCallBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ hasCodeBlock3.link(this);
+
+ Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ arityCheckOkay3.link(this);
+ isNativeFunc3.link(this);
+
+ // load ctiCode from the new codeBlock.
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
+
+ compileOpCallInitializeCallFrame();
+ jump(regT0);
+
+
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+
+#if PLATFORM(X86_64)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
+
+ // Allocate stack space for our arglist
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
+
+ // Set up arguments
+ subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in edx
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
+ mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
+ subPtr(X86::ecx, X86::edx);
+
+ // push pointer to arguments
+ storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister
+ move(stackPointerRegister, X86::ecx);
+
+ // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
+ loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
+
+ move(callFrameRegister, X86::edi);
+
+ call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+#elif PLATFORM(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
+ storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86::edx);
+
+ call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type
+ loadPtr(Address(X86::eax), X86::eax);
+#else
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
+
+ // Plant callframe
+ move(callFrameRegister, X86::ecx);
+ call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ // Allocate stack space for our arglist
+ COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0, ArgList_should_by_8byte_aligned);
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ move(callFrameRegister, regT1);
+ sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+
+ // push pointer to arguments
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // Setup arg3: regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int32_t)sizeof(Register)), regT2);
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+
+ // Setup arg1:
+ move(callFrameRegister, regT0);
+
+ // Setup arg4: This is a plain hack
+ move(stackPointerRegister, ARM::S0);
+
+ move(ctiReturnRegister, ARM::lr);
+ call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+#endif
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiStringLengthTrampoline);
+#endif
+}
+
void JIT::emit_op_mov(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -62,7 +1835,7 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
void JIT::emit_op_end(Instruction* currentInstruction)
{
if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, JITStubs::cti_op_end).call();
+ JITStubCall(this, cti_op_end).call();
ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
@@ -94,7 +1867,7 @@ void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -103,7 +1876,7 @@ void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -127,7 +1900,7 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -143,7 +1916,7 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -159,18 +1932,18 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT1);
// Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
- addSlowCase(branchTest32(Zero, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
// If value is not an Object, return false.
Jump valueIsImmediate = emitJumpIfNotJSCell(regT2);
- loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT0);
- Jump valueIsNotObject = branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
// Check proto is object.
- loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -180,8 +1953,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
- loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
@@ -197,7 +1970,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
void JIT::emit_op_new_func(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_func);
+ JITStubCall stubCall(this, cti_op_new_func);
stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -214,9 +1987,14 @@ void JIT::emit_op_call_eval(Instruction* currentInstruction)
void JIT::emit_op_load_varargs(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
}
void JIT::emit_op_call_varargs(Instruction* currentInstruction)
@@ -251,9 +2029,9 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
while (skip--)
- loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -265,29 +2043,29 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
while (skip--)
- loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
- loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
}
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call();
}
void JIT::emit_op_tear_off_arguments(Instruction*)
{
- JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+ JITStubCall(this, cti_op_tear_off_arguments).call();
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
// We could JIT generate the deref, only calling out to C when the refcount hits zero.
if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+ JITStubCall(this, cti_op_ret_scopeChain).call();
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueRegister);
@@ -309,7 +2087,7 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_array);
+ JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
@@ -317,7 +2095,7 @@ void JIT::emit_op_new_array(Instruction* currentInstruction)
void JIT::emit_op_resolve(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve);
+ JITStubCall stubCall(this, cti_op_resolve);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -327,8 +2105,8 @@ void JIT::emit_op_construct_verify(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
}
@@ -350,20 +2128,12 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_strcat);
+ JITStubCall stubCall(this, cti_op_strcat);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_func(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
{
emitTimeoutCheck();
@@ -381,14 +2151,14 @@ void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
};
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
+ JITStubCall stubCall(this, cti_op_resolve_base);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
+ JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
stubCall.call(currentInstruction[1].u.operand);
@@ -407,11 +2177,11 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
// Check Structure of global object
move(ImmPtr(globalObject), regT0);
loadPtr(structureAddress, regT1);
- Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
+ Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match
// Load cached property
// Assume that the global object always uses external storage.
- loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0);
load32(offsetAddr, regT1);
loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
@@ -419,7 +2189,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
// Slow case
noMatch.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
+ JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
@@ -459,8 +2229,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
@@ -480,8 +2250,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
@@ -505,13 +2275,6 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
RECORD_JUMP_TARGET(target + 3);
}
-void JIT::emit_op_unexpected_load(Instruction* currentInstruction)
-{
- JSValue v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
- move(ImmPtr(JSValue::encode(v)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_jsr(Instruction* currentInstruction)
{
int retAddrDst = currentInstruction[1].u.operand;
@@ -542,7 +2305,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
not32(regT0);
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -553,7 +2316,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call(currentInstruction[2].u.operand);
@@ -561,7 +2324,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
+ JITStubCall stubCall(this, cti_op_new_func_exp);
stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -603,7 +2366,7 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
+ JITStubCall stubCall(this, cti_op_new_regexp);
stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -618,7 +2381,7 @@ void JIT::emit_op_bitor(Instruction* currentInstruction)
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_throw);
+ JITStubCall stubCall(this, cti_op_throw);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call();
ASSERT(regT0 == returnValueRegister);
@@ -631,7 +2394,7 @@ void JIT::emit_op_throw(Instruction* currentInstruction)
void JIT::emit_op_next_pname(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
+ JITStubCall stubCall(this, cti_op_next_pname);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.call();
Jump endOfIter = branchTestPtr(Zero, regT0);
@@ -642,14 +2405,37 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
void JIT::emit_op_push_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
+ JITStubCall stubCall(this, cti_op_push_scope);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_pop_scope(Instruction*)
{
- JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ if (type == OpStrictEq)
+ set32(Equal, regT1, regT0, regT0);
+ else
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_stricteq(Instruction* currentInstruction)
@@ -670,8 +2456,8 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
wasImmediate.link(this);
@@ -680,7 +2466,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
+ JITStubCall stubCall(this, cti_op_push_new_scope);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call(currentInstruction[1].u.operand);
@@ -689,13 +2475,13 @@ void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
void JIT::emit_op_catch(Instruction* currentInstruction)
{
killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- peek(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+ peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
addJump(jump(), currentInstruction[2].u.operand + 2);
@@ -713,7 +2499,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
+ JITStubCall stubCall(this, cti_op_switch_imm);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -731,7 +2517,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
+ JITStubCall stubCall(this, cti_op_switch_char);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -748,7 +2534,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
- JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
+ JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -757,16 +2543,16 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
void JIT::emit_op_new_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_error);
+ JITStubCall stubCall(this, cti_op_new_error);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand))));
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
stubCall.addArgument(Imm32(m_bytecodeIndex));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_debug);
+ JITStubCall stubCall(this, cti_op_debug);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
@@ -781,8 +2567,8 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
@@ -806,8 +2592,8 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
@@ -828,7 +2614,7 @@ void JIT::emit_op_enter(Instruction*)
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ size_t count = m_codeBlock->m_numVars;
for (size_t j = 0; j < count; ++j)
emitInitRegister(j);
@@ -839,20 +2625,20 @@ void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ size_t count = m_codeBlock->m_numVars;
for (size_t j = 0; j < count; ++j)
emitInitRegister(j);
- JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
}
void JIT::emit_op_create_arguments(Instruction*)
{
Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
else
- JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+ JITStubCall(this, cti_op_create_arguments).call();
argsCreated.link(this);
}
@@ -866,17 +2652,17 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
- addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
}
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
- peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
- JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
+ JITStubCall stubCall(this, cti_op_profile_will_call);
stubCall.addArgument(currentInstruction[1].u.operand, regT1);
stubCall.call();
noProfiler.link(this);
@@ -885,10 +2671,10 @@ void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
- JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
+ JITStubCall stubCall(this, cti_op_profile_did_call);
stubCall.addArgument(currentInstruction[1].u.operand, regT1);
stubCall.call();
noProfiler.link(this);
@@ -901,7 +2687,7 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_convert_this);
+ JITStubCall stubCall(this, cti_op_convert_this);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -918,7 +2704,7 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_to_primitive);
+ JITStubCall stubCall(this, cti_op_to_primitive);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -934,7 +2720,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_get_by_val);
+ JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -943,10 +2729,10 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
// This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
// First, check if this is an access to the vector
linkSlowCase(iter);
- branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
+ branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow);
// okay, missed the fast region, but it is still in the vector. Get the value.
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2);
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2);
// Check whether the value loaded is zero; if so we need to return undefined.
branchTestPtr(Zero, regT2, beginGetByValSlow);
move(regT2, regT0);
@@ -960,14 +2746,14 @@ void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowC
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call();
@@ -975,7 +2761,7 @@ void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowC
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -989,7 +2775,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.call();
@@ -997,7 +2783,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -1014,7 +2800,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this); {
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_val);
+ JITStubCall stubCall(this, cti_op_put_by_val);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -1025,7 +2811,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
// slow cases for immediate int accesses to arrays
linkSlowCase(iter);
linkSlowCase(iter); {
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_val_array);
+ JITStubCall stubCall(this, cti_op_put_by_val_array);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -1036,7 +2822,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
@@ -1046,7 +2832,7 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>
{
linkSlowCase(iter);
xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_not);
+ JITStubCall stubCall(this, cti_op_not);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1054,7 +2840,7 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
@@ -1063,7 +2849,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitnot);
+ JITStubCall stubCall(this, cti_op_bitnot);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1071,7 +2857,7 @@ void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
@@ -1080,7 +2866,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitxor);
+ JITStubCall stubCall(this, cti_op_bitxor);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1089,7 +2875,7 @@ void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitor);
+ JITStubCall stubCall(this, cti_op_bitor);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1098,26 +2884,31 @@ void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntr
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_eq);
+ JITStubCall stubCall(this, cti_op_eq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.call();
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_neq);
+ JITStubCall stubCall(this, cti_op_eq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.call();
+ xor32(Imm32(0x1), regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_stricteq);
+ JITStubCall stubCall(this, cti_op_stricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1127,7 +2918,7 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_nstricteq);
+ JITStubCall stubCall(this, cti_op_nstricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1140,7 +2931,7 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_instanceof);
+ JITStubCall stubCall(this, cti_op_instanceof);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.addArgument(currentInstruction[4].u.operand, regT2);
@@ -1172,11 +2963,12 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_to_jsnumber);
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
+#endif // USE(JSVALUE32_64)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 3a6f9b3..9dba2e2 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,8 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -45,11 +47,920 @@ using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(value);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ move(Imm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
+ Label callGetByValJITStub(this);
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ linkSlowCase(iter); // array fast cut-off check
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
+ branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
+
+ // Missed the fast region, but it is still in the vector.
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+
+ // FIXME: Maybe we can optimize this comparison to JSValue().
+ Jump skip = branch32(NotEqual, regT0, Imm32(0));
+ branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
+
+ skip.link(this);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+
+ Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
+
+ // Check if the access is within the vector.
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
+
+ // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+ // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
+ Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
+ addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
+ skip.link(this);
+
+ inFastVector.link(this);
+
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ // Slow cases for immediate int accesses to arrays.
+ linkSlowCase(iter); // in vector check
+ linkSlowCase(iter); // written to slot check
+
+ JITStubCall stubCall(this, cti_op_put_by_val_array);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT2);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
+ DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ Label coldPathBegin(this);
+
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_put_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitLoad(offset, resultTag, resultPayload, base);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ if (base->isUsingInlineStorage()) {
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
+ return;
+ }
+
+ size_t offset = cachedOffset * sizeof(JSValue);
+
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ load32(Address(temp, offset), resultPayload);
+ load32(Address(temp, offset + 4), resultTag);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+{
+ // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
+
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
+ }
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
+
+ // Write the value
+ compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ Structure* prototypeStructure = proto->structure();
+ ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+
+ Jump failureCase = checkStructure(regT0, structure);
+ compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+#else // USE(JSVALUE32_64)
+
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
// number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
@@ -64,11 +975,11 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
// This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
// Get the value from the vector
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -76,7 +987,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
#else
@@ -86,24 +997,24 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
// This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
- Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
// No; oh well, check if the access if within the vector - if so, we may still be okay.
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
// This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
// FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
- addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
+ addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
// All good - put the value into the array.
inFastVector.link(this);
emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
}
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
+ JITStubCall stubCall(this, cti_op_put_by_index);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -112,7 +1023,7 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
void JIT::emit_op_put_getter(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
+ JITStubCall stubCall(this, cti_op_put_getter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -121,7 +1032,7 @@ void JIT::emit_op_put_getter(Instruction* currentInstruction)
void JIT::emit_op_put_setter(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
+ JITStubCall stubCall(this, cti_op_put_setter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -130,7 +1041,7 @@ void JIT::emit_op_put_setter(Instruction* currentInstruction)
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
+ JITStubCall stubCall(this, cti_op_del_by_id);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
@@ -155,7 +1066,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitGetVirtualRegister(baseVReg, regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.call(resultVReg);
@@ -176,7 +1087,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -212,9 +1123,9 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
Jump notCell = emitJumpIfNotJSCell(regT0);
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, FIELD_OFFSET(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
@@ -247,7 +1158,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
unsigned baseVReg = currentInstruction[2].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, true);
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
// We've already generated the following get_by_id, so make sure it's skipped over.
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
@@ -285,12 +1196,12 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
Label externalLoadComplete(this);
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
@@ -308,10 +1219,10 @@ void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCase
unsigned baseVReg = currentInstruction[2].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, false);
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
}
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck)
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
{
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
// so that we only need track one pointer into the slow case code - we track a pointer to the location
@@ -325,7 +1236,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
#ifndef NDEBUG
Label coldPathBegin(this);
#endif
- JITStubCall stubCall(this, isMethodCheck ? JITStubs::cti_op_get_by_id_method_check : JITStubs::cti_op_get_by_id);
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
Call call = stubCall.call(resultVReg);
@@ -333,7 +1244,8 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
@@ -357,11 +1269,11 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
Label externalLoadComplete(this);
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
@@ -380,7 +1292,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
+ JITStubCall stubCall(this, cti_op_put_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -396,9 +1308,9 @@ void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* s
{
int offset = cachedOffset * sizeof(JSValue);
if (structure->isUsingInlineStorage())
- offset += FIELD_OFFSET(JSObject, m_inlineStorage);
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
else
- loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
storePtr(value, Address(base, offset));
}
@@ -407,34 +1319,37 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure*
{
int offset = cachedOffset * sizeof(JSValue);
if (structure->isUsingInlineStorage())
- offset += FIELD_OFFSET(JSObject, m_inlineStorage);
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
else
- loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
loadPtr(Address(base, offset), result);
}
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
{
if (base->isUsingInlineStorage())
loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
- else
- loadPtr(static_cast<void*>(&base->m_externalStorage[cachedOffset]), result);
+ else {
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
+ }
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
- failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
JumpList successCases;
// ecx = baseObject
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
// proto(ecx) = baseObject->structure()->prototype()
- failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
// ecx = baseObject->m_structure
for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
@@ -442,11 +1357,11 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
// Check the structure id
- failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
+ failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
- loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
- failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
}
successCases.link(this);
@@ -458,15 +1373,16 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
if (willNeedStorageRealloc) {
// This trampoline was called to like a JIT stub; before we can can call again we need to
// remove the return address from the stack, to prevent the stack from becoming misaligned.
- preverveReturnAddressAfterCall(regT3);
+ preserveReturnAddressAfterCall(regT3);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_transition_realloc);
- stubCall.addArgument(regT0);
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.addArgument(regT1); // This argument is not used in the stub; we set it up on the stack so that it can be restored, below.
stubCall.call(regT0);
- emitGetJITStubArg(4, regT1);
+ emitGetJITStubArg(3, regT1);
restoreReturnAddressBeforeReturn(regT3);
}
@@ -475,7 +1391,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// codeblock should ensure oldStructure->m_refCount > 0
sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure)));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
// write the value
compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
@@ -487,88 +1403,99 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
- patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(JITStubs::cti_op_put_by_id_transition_realloc));
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
stubInfo->stubRoutine = entryLabel;
- returnAddress.relinkCallerToTrampoline(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
}
-void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
+ RepatchBuffer repatchBuffer(codeBlock);
+
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_self_fail));
+ // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
int offset = sizeof(JSValue) * cachedOffset;
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
// and makes the subsequent load's offset automatically correct
if (structure->isUsingInlineStorage())
- stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad).repatchLoadPtrToLEA();
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure);
- stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(offset);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
}
-void JIT::patchMethodCallProto(MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
{
+ RepatchBuffer repatchBuffer(codeBlock);
+
ASSERT(!methodCallLinkInfo.cachedStructure);
methodCallLinkInfo.cachedStructure = structure;
structure->ref();
- methodCallLinkInfo.structureLabel.repatch(structure);
- methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj).repatch(proto);
- methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct).repatch(proto->structure());
- methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction).repatch(callee);
+ Structure* prototypeStructure = proto->structure();
+ ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
}
-void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
+ RepatchBuffer repatchBuffer(codeBlock);
+
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
int offset = sizeof(JSValue) * cachedOffset;
// If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
// and makes the subsequent load's offset automatically correct
if (structure->isUsingInlineStorage())
- stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad).repatchLoadPtrToLEA();
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure);
- stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(offset);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
}
-void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress)
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress.addressForLookup());
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
// Check eax is an array
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
- load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -584,14 +1511,15 @@ void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
-}
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
-{
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
@@ -609,11 +1537,11 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
#endif
// Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -629,7 +1557,11 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
@@ -638,7 +1570,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
@@ -657,7 +1589,8 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
}
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
@@ -679,11 +1612,11 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
#endif
// Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -701,7 +1634,8 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
}
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
@@ -732,10 +1666,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
ASSERT(protoObject);
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -754,14 +1688,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
-
ASSERT(count);
JumpList bucketsOfFail;
@@ -787,10 +1719,10 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
}
ASSERT(protoObject);
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
@@ -804,13 +1736,19 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- jumpLocation.relink(entryLabel);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
index 6c9ccc1..cb5354b 100644
--- a/JavaScriptCore/jit/JITStubCall.h
+++ b/JavaScriptCore/jit/JITStubCall.h
@@ -37,32 +37,40 @@ namespace JSC {
JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Cell)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Cell)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(VoidPtr)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Int)
+ , m_stackIndex(stackIndexStart)
+ {
+ }
+
+ JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Int)
+ , m_stackIndex(stackIndexStart)
{
}
@@ -70,30 +78,78 @@ namespace JSC {
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
, m_returnType(Void)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_stackIndex(stackIndexStart)
{
}
+#if USE(JSVALUE32_64)
+ JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_stackIndex(stackIndexStart)
+ {
+ }
+#endif
+
// Arguments are added first to last.
+ void skipArgument()
+ {
+ m_stackIndex += stackIndexStep;
+ }
+
void addArgument(JIT::Imm32 argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
}
void addArgument(JIT::ImmPtr argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
}
void addArgument(JIT::RegisterID argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(const JSValue& value)
+ {
+ m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
+ m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ m_jit->poke(payload, m_stackIndex);
+ m_jit->poke(tag, m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
}
+#if USE(JSVALUE32_64)
+ void addArgument(unsigned srcVirtualRegister)
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
+ addArgument(m_jit->getConstantOperand(srcVirtualRegister));
+ return;
+ }
+
+ m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
+ addArgument(JIT::regT1, JIT::regT0);
+ }
+
+ void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ size_t stackIndex = stackIndexStart + (argumentNumber * stackIndexStep);
+ m_jit->peek(payload, stackIndex);
+ m_jit->peek(tag, stackIndex + 1);
+ }
+#else
void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
{
if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
@@ -104,13 +160,13 @@ namespace JSC {
}
m_jit->killLastResultRegister();
}
+#endif
JIT::Call call()
{
- ASSERT(m_jit->m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
#if ENABLE(OPCODE_SAMPLING)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
+ if (m_jit->m_bytecodeIndex != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
#endif
m_jit->restoreArgumentReference();
@@ -118,24 +174,46 @@ namespace JSC {
m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub));
#if ENABLE(OPCODE_SAMPLING)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
+ if (m_jit->m_bytecodeIndex != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
#endif
+#if USE(JSVALUE32_64)
+ m_jit->unmap();
+#else
m_jit->killLastResultRegister();
+#endif
return call;
}
+#if USE(JSVALUE32_64)
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
+ }
+#else
JIT::Call call(unsigned dst) // dst is a virtual register.
{
- ASSERT(m_returnType == Value);
+ ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
JIT::Call call = this->call();
m_jit->emitPutVirtualRegister(dst);
return call;
}
+#endif
- JIT::Call call(JIT::RegisterID dst)
+ JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
{
- ASSERT(m_returnType == Value);
+#if USE(JSVALUE32_64)
+ ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#else
+ ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#endif
JIT::Call call = this->call();
if (dst != JIT::returnValueRegister)
m_jit->move(JIT::returnValueRegister, dst);
@@ -143,25 +221,13 @@ namespace JSC {
}
private:
+ static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
+ static const size_t stackIndexStart = 1; // Index 0 is reserved for restoreArgumentReference().
+
JIT* m_jit;
void* m_stub;
- enum { Value, Void } m_returnType;
- size_t m_argumentIndex;
- };
-
- class CallEvalJITStub : public JITStubCall {
- public:
- CallEvalJITStub(JIT* jit, Instruction* instruction)
- : JITStubCall(jit, JITStubs::cti_op_call_eval)
- {
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- addArgument(callee, JIT::regT2);
- addArgument(JIT::Imm32(registerOffset));
- addArgument(JIT::Imm32(argCount));
- }
+ enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
+ size_t m_stackIndex;
};
}
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index a40d1ba..d563f58 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -56,28 +56,31 @@
#include "RegExpPrototype.h"
#include "Register.h"
#include "SamplingTool.h"
+#include <stdarg.h>
#include <stdio.h>
using namespace std;
namespace JSC {
-
#if PLATFORM(DARWIN) || PLATFORM(WIN_OS)
#define SYMBOL_STRING(name) "_" #name
#else
#define SYMBOL_STRING(name) #name
#endif
+#if USE(JSVALUE32_64)
+
#if COMPILER(GCC) && PLATFORM(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-asm(
+asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"pushl %ebp" "\n"
@@ -85,11 +88,11 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"pushl %esi" "\n"
"pushl %edi" "\n"
"pushl %ebx" "\n"
- "subl $0x1c, %esp" "\n"
+ "subl $0x3c, %esp" "\n"
"movl $512, %esi" "\n"
- "movl 0x38(%esp), %edi" "\n"
- "call *0x30(%esp)" "\n"
- "addl $0x1c, %esp" "\n"
+ "movl 0x58(%esp), %edi" "\n"
+ "call *0x50(%esp)" "\n"
+ "addl $0x3c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
"popl %esi" "\n"
@@ -97,16 +100,25 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm(
+asm volatile (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
#endif
"call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addl $0x1c, %esp" "\n"
+ "addl $0x3c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
"popl %esi" "\n"
@@ -122,11 +134,12 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-asm(
+asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"pushq %rbp" "\n"
@@ -152,11 +165,22 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm(
+asm volatile (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
"call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"addq $0x48, %rsp" "\n"
@@ -169,24 +193,13 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_V7)
+#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
#endif
-COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedReturnAddress) == 0x20, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR4) == 0x24, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR5) == 0x28, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR6) == 0x2c, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
-
-COMPILE_ASSERT(offsetof(struct JITStackFrame, registerFile) == 0x30, JITStackFrame_registerFile_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x34, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, exception) == 0x38, JITStackFrame_exception_offset_matches_ctiTrampoline);
-// The fifth argument is the first item already on the stack.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, enabledProfilerReference) == 0x3c, JITStackFrame_enabledProfilerReference_offset_matches_ctiTrampoline);
-
-asm volatile (
+asm volatile (
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
@@ -252,9 +265,10 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
extern "C" {
@@ -266,12 +280,12 @@ extern "C" {
push esi;
push edi;
push ebx;
- sub esp, 0x1c;
+ sub esp, 0x3c;
mov esi, 512;
mov ecx, esp;
- mov edi, [esp + 0x38];
- call [esp + 0x30];
- add esp, 0x1c;
+ mov edi, [esp + 0x58];
+ call [esp + 0x50];
+ add esp, 0x3c;
pop ebx;
pop edi;
pop esi;
@@ -284,8 +298,8 @@ extern "C" {
{
__asm {
mov ecx, esp;
- call JITStubs::cti_vm_throw;
- add esp, 0x1c;
+ call cti_vm_throw;
+ add esp, 0x3c;
pop ebx;
pop edi;
pop esi;
@@ -297,7 +311,7 @@ extern "C" {
__declspec(naked) void ctiOpThrowNotCaught()
{
__asm {
- add esp, 0x1c;
+ add esp, 0x3c;
pop ebx;
pop edi;
pop esi;
@@ -307,8 +321,311 @@ extern "C" {
}
}
+#endif // COMPILER(GCC) && PLATFORM(X86)
+
+#else // USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && PLATFORM(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x1c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x38(%esp), %edi" "\n"
+ "call *0x30(%esp)" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
+ "movl %esp, %ecx" "\n"
+#endif
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(X86_64)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
#endif
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ // Form the JIT stubs area
+ "pushq %r9" "\n"
+ "pushq %r8" "\n"
+ "pushq %rcx" "\n"
+ "pushq %rdx" "\n"
+ "pushq %rsi" "\n"
+ "pushq %rdi" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq %rdx, %r13" "\n"
+ "call *%rdi" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
+#endif
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #0x3c" "\n"
+ "str lr, [sp, #0x20]" "\n"
+ "str r4, [sp, #0x24]" "\n"
+ "str r5, [sp, #0x28]" "\n"
+ "str r6, [sp, #0x2c]" "\n"
+ "str r1, [sp, #0x30]" "\n"
+ "str r2, [sp, #0x34]" "\n"
+ "str r3, [sp, #0x38]" "\n"
+ "cpy r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "cpy r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(ARM)
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "stmdb sp!, {r1-r3}" "\n"
+ "stmdb sp!, {r4-r8, lr}" "\n"
+ "mov r6, pc" "\n"
+ "add r6, r6, #40" "\n"
+ "sub sp, sp, #32" "\n"
+ "ldr r4, [sp, #60]" "\n"
+ "mov r5, #512" "\n"
+ // r0 contains the code
+ "add r8, pc, #4" "\n"
+ "str r8, [sp, #-4]!" "\n"
+ "mov pc, r0" "\n"
+ "add sp, sp, #32" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+
+ // the return instruction
+ "ldr pc, [sp], #4" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "mov lr, r6" "\n"
+ "add r8, pc, #4" "\n"
+ "str r8, [sp, #-4]!" "\n"
+ "b " SYMBOL_STRING(cti_vm_throw) "\n"
+
+// Both has the same return sequence
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add sp, sp, #32" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+#elif COMPILER(MSVC)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x38];
+ call [esp + 0x30];
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call cti_vm_throw;
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#endif // COMPILER(GCC) && PLATFORM(X86)
+
+#endif // USE(JSVALUE32_64)
+
#if ENABLE(OPCODE_SAMPLING)
#define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
#else
@@ -317,12 +634,30 @@ extern "C" {
JITThunks::JITThunks(JSGlobalData* globalData)
{
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
+
+#if PLATFORM_ARM_ARCH(7)
+ // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
+ // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
+ // macros.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
+ // The fifth argument is the first item already on the stack.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x3c);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
+#endif
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const PutPropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -331,7 +666,7 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
@@ -339,23 +674,25 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
Structure* structure = baseCell->structure();
if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
// If baseCell != base, then baseCell must be a proxy for another object.
if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
-
// Cache hit: Specialize instruction and ref Structures.
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ if (!prototypeChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ return;
+ }
stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress);
return;
@@ -363,17 +700,17 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
stubInfo->initPutByIdReplace(structure);
- JIT::patchPutByIdReplace(stubInfo, structure, slot.cachedOffset(), returnAddress);
+ JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
}
-NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
{
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
// FIXME: Cache property access for immediates.
if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
@@ -387,13 +724,13 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
// The tradeoff of compiling an patched inline string length access routine does not seem
// to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
return;
}
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
@@ -401,22 +738,17 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
Structure* structure = baseCell->structure();
if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
- // In the interpreter the last structure is trapped here; in CTI we use the
- // *_second method to achieve a similar (but not quite the same) effect.
-
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
-
// Cache hit: Specialize instruction and ref Structures.
if (slot.slotBase() == baseValue) {
// set this up, so derefStructures can do it's job.
stubInfo->initGetByIdSelf(structure);
- JIT::patchGetByIdSelf(stubInfo, structure, slot.cachedOffset(), returnAddress);
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
return;
}
@@ -438,16 +770,20 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot);
if (!count) {
- stubInfo->opcodeID = op_get_by_id_generic;
+ stubInfo->accessType = access_get_by_id_generic;
return;
}
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ if (!prototypeChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
stubInfo->initGetByIdChain(structure, prototypeChain);
JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress);
}
-#endif
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
@@ -473,7 +809,7 @@ struct StackHack {
: stackFrame(stackFrame)
, savedReturnAddress(*stackFrame.returnAddressSlot())
{
- *stackFrame.returnAddressSlot() = reinterpret_cast<void*>(jscGeneratedNativeCode);
+ *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
}
ALWAYS_INLINE ~StackHack()
@@ -482,17 +818,17 @@ struct StackHack {
}
JITStackFrame& stackFrame;
- void* savedReturnAddress;
+ ReturnAddressPtr savedReturnAddress;
};
#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = returnAddress
+#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
#else
#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = returnAddress
+#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
#endif
@@ -501,14 +837,14 @@ struct StackHack {
// to get the address of the ctiVMThrowTrampoline function. It's also
// good to keep the code size down by leaving as much of the exception
// handling code out of line as possible.
-static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot)
+static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
{
ASSERT(globalData->exception);
globalData->exceptionLocation = exceptionLocation;
- returnAddressSlot = reinterpret_cast<void*>(ctiVMThrowTrampoline);
+ returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
}
-static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot)
+static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
{
globalData->exception = createStackOverflowError(callFrame);
returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot);
@@ -524,27 +860,23 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
#define CHECK_FOR_EXCEPTION() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION(); \
} while (0)
#define CHECK_FOR_EXCEPTION_AT_END() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION_AT_END(); \
} while (0)
#define CHECK_FOR_EXCEPTION_VOID() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) { \
+ if (UNLIKELY(stackFrame.globalData->exception)) { \
VM_THROW_EXCEPTION_AT_END(); \
return; \
} \
} while (0)
-namespace JITStubs {
-
-#if PLATFORM(ARM_V7)
-
-COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == 0x1C, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+#if PLATFORM_ARM_ARCH(7)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
@@ -568,7 +900,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == 0x1C, JITSt
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
#endif
-DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -577,7 +909,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
JSObject* result = v1.toThisObject(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- return result;
+ return JSValue::encode(result);
}
DEFINE_STUB_FUNCTION(void, op_end)
@@ -617,8 +949,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
}
if (rightIsNumber & leftIsString) {
- RefPtr<UString::Rep> value = v2.isInt32Fast() ?
- concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) :
+ RefPtr<UString::Rep> value = v2.isInt32() ?
+ concatenate(asString(v1)->value().rep(), v2.asInt32()) :
concatenate(asString(v1)->value().rep(), right);
if (UNLIKELY(!value)) {
@@ -672,7 +1004,7 @@ DEFINE_STUB_FUNCTION(void, register_file_check)
// moved the call frame forward.
CallFrame* oldCallFrame = stackFrame.callFrame->callerFrame();
stackFrame.callFrame = oldCallFrame;
- throwStackOverflowError(oldCallFrame, stackFrame.globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS);
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, ReturnAddressPtr(oldCallFrame->returnPC()), STUB_RETURN_ADDRESS);
}
DEFINE_STUB_FUNCTION(int, op_loop_if_less)
@@ -737,25 +1069,19 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
DEFINE_STUB_FUNCTION(void, op_put_by_id)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
PutPropertySlot slot;
stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_second)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo);
- PutPropertySlot slot;
- stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- JITThunks::tryCachePutByID(stackFrame.callFrame, stackFrame.callFrame->codeBlock(), STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -772,36 +1098,19 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
CHECK_FOR_EXCEPTION_AT_END();
}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_put_by_id_transition_realloc)
+DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
{
STUB_INIT_STACK_FRAME(stackFrame);
JSValue baseValue = stackFrame.args[0].jsValue();
- int32_t oldSize = stackFrame.args[1].int32();
- int32_t newSize = stackFrame.args[2].int32();
+ int32_t oldSize = stackFrame.args[3].int32();
+ int32_t newSize = stackFrame.args[4].int32();
ASSERT(baseValue.isObject());
- asObject(baseValue)->allocatePropertyStorage(oldSize, newSize);
+ JSObject* base = asObject(baseValue);
+ base->allocatePropertyStorage(oldSize, newSize);
- return JSValue::encode(baseValue);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
+ return base;
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
@@ -814,25 +1123,15 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
JSValue result = baseValue.get(callFrame, ident, slot);
+ CHECK_FOR_EXCEPTION();
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_method_check_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
- CHECK_FOR_EXCEPTION();
+ if (!methodCallLinkInfo.seenOnce()) {
+ methodCallLinkInfo.setSeen();
+ return JSValue::encode(result);
+ }
// If we successfully got something, then the base from which it is being accessed must
// be an object. (Assertion to ensure asObject() call below is safe, which comes after
@@ -863,33 +1162,33 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
// The result fetched should always be the callee!
ASSERT(result == JSValue(callee));
- MethodCallLinkInfo& methodCallLinkInfo = callFrame->codeBlock()->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
// Check to see if the function is on the object's prototype. Patch up the code to optimize.
- if (slot.slotBase() == structure->prototypeForLookup(callFrame))
- JIT::patchMethodCallProto(methodCallLinkInfo, callee, structure, slotBaseObject);
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+
// Check to see if the function is on the object itself.
// Since we generate the method-check to check both the structure and a prototype-structure (since this
// is the common case) we have a problem - we need to patch the prototype structure check to do something
// useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
// for now. For now it performs a check on a special object on the global object only used for this
// purpose. The object is in no way exposed, and as such the check will always pass.
- else if (slot.slotBase() == baseValue)
- JIT::patchMethodCallProto(methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy());
-
- // For now let any other case be cached as a normal get_by_id.
+ if (slot.slotBase() == baseValue) {
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
}
// Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
-
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
@@ -897,7 +1196,12 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
PropertySlot slot(baseValue);
JSValue result = baseValue.get(callFrame, ident, slot);
- JITThunks::tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -929,9 +1233,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
PolymorphicAccessStructureList* polymorphicStructureList;
int listIndex = 1;
- if (stubInfo->opcodeID == op_get_by_id_self) {
+ if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(MacroAssembler::CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
+ polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
} else {
polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
@@ -942,10 +1246,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- } else {
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- }
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ } else
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
return JSValue::encode(result);
}
@@ -954,18 +1257,18 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
PolymorphicAccessStructureList* prototypeStructureList = 0;
listIndex = 1;
- switch (stubInfo->opcodeID) {
- case op_get_by_id_proto:
+ switch (stubInfo->accessType) {
+ case access_get_by_id_proto:
prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
- stubInfo->stubRoutine.reset();
+ stubInfo->stubRoutine = CodeLocationLabel();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
- case op_get_by_id_chain:
+ case access_get_by_id_chain:
prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
- stubInfo->stubRoutine.reset();
+ stubInfo->stubRoutine = CodeLocationLabel();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
- case op_get_by_id_proto_list:
+ case access_get_by_id_proto_list:
prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
listIndex = stubInfo->u.getByIdProtoList.listSize;
stubInfo->u.getByIdProtoList.listSize++;
@@ -991,7 +1294,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CHECK_FOR_EXCEPTION();
if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -1003,7 +1306,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
JSObject* slotBaseObject = asObject(slot.slotBase());
if (slot.slotBase() == baseValue)
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
@@ -1016,16 +1319,22 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
} else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) {
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ if (!protoChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
+ }
+
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
- JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, structure->prototypeChain(callFrame), count, slot.cachedOffset());
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
} else
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -1078,7 +1387,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
return JSValue::encode(result);
}
-#endif
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
{
@@ -1171,6 +1480,7 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
#endif
JSFunction* function = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!function->isHostFunction());
FunctionBodyNode* body = function->body();
ScopeChainNode* callDataScopeChain = function->scope().node();
body->jitCode(callDataScopeChain);
@@ -1184,6 +1494,7 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock();
+ ASSERT(newCodeBlock->codeType() != NativeCode);
int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
@@ -1223,34 +1534,28 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
RETURN_POINTER_PAIR(newCodeBlock, callFrame);
}
-DEFINE_STUB_FUNCTION(void*, vm_dontLazyLinkCall)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSGlobalData* globalData = stackFrame.globalData;
- JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
-
- ctiPatchNearCallByReturnAddress(stackFrame.args[1].returnAddress(), globalData->jitStubs.ctiVirtualCallLink());
-
- return callee->body()->generatedJITCode().addressForCall().executableAddress();
-}
-
+#if ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
JITCode& jitCode = callee->body()->generatedJITCode();
CodeBlock* codeBlock = 0;
if (!callee->isHostFunction())
codeBlock = &callee->body()->bytecode(callee->scope().node());
-
+ else
+ codeBlock = &callee->body()->generatedBytecode();
CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
- JIT::linkCall(callee, codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32());
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
return jitCode.addressForCall().executableAddress();
}
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
@@ -1278,7 +1583,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
CallFrame* previousCallFrame = stackFrame.callFrame;
CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
- callFrame->init(0, static_cast<Instruction*>(STUB_RETURN_ADDRESS), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
stackFrame.callFrame = callFrame;
Register* argv = stackFrame.callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
@@ -1316,7 +1621,7 @@ DEFINE_STUB_FUNCTION(void, op_create_arguments)
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
}
DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
@@ -1325,7 +1630,7 @@ DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
}
DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
@@ -1477,8 +1782,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSArray(globalData, baseValue)) {
JSArray* jsArray = asArray(baseValue);
if (jsArray->canGetIndex(i))
@@ -1487,11 +1792,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
result = jsArray->JSArray::get(callFrame, i);
} else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
result = asString(baseValue)->getIndex(stackFrame.globalData, i);
} else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
} else
result = baseValue.get(callFrame, i);
@@ -1516,14 +1821,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
result = asString(baseValue)->getIndex(stackFrame.globalData, i);
else {
result = baseValue.get(callFrame, i);
if (!isJSString(globalData, baseValue))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
}
} else {
Identifier property(callFrame, subscript.toString(callFrame));
@@ -1534,7 +1839,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
return JSValue::encode(result);
}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1547,8 +1851,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
@@ -1556,7 +1860,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
result = baseValue.get(callFrame, i);
if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
} else {
Identifier property(callFrame, subscript.toString(callFrame));
result = baseValue.get(callFrame, property);
@@ -1566,50 +1870,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_func)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ScopeChainNode* scopeChain = callFrame->scopeChain();
-
- ScopeChainIterator iter = scopeChain->begin();
- ScopeChainIterator end = scopeChain->end();
-
- // FIXME: add scopeDepthIsZero optimization
-
- ASSERT(iter != end);
-
- Identifier& ident = stackFrame.args[0].identifier();
- JSObject* base;
- do {
- base = *iter;
- PropertySlot slot(base);
- if (base->getPropertySlot(callFrame, ident, slot)) {
- // ECMA 11.2.3 says that if we hit an activation the this value should be null.
- // However, section 10.2.3 says that in the case where the value provided
- // by the caller is null, the global object should be used. It also says
- // that the section does not apply to internal functions, but for simplicity
- // of implementation we use the global object anyway here. This guarantees
- // that in host objects you always get a valid object for this.
- // We also handle wrapper substitution for the global object at the same time.
- JSObject* thisObj = base->toThisObject(callFrame);
- JSValue result = slot.getValue(callFrame, ident);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = JSValue(thisObj);
- return JSValue::encode(result);
- }
- ++iter;
- } while (iter != end);
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
- VM_THROW_EXCEPTION_AT_END();
- return JSValue::encode(JSValue());
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1639,8 +1899,8 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
JSValue subscript = stackFrame.args[1].jsValue();
JSValue value = stackFrame.args[2].jsValue();
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSArray(globalData, baseValue)) {
JSArray* jsArray = asArray(baseValue);
if (jsArray->canSetIndex(i))
@@ -1649,10 +1909,10 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
jsArray->JSArray::put(callFrame, i, value);
} else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
JSByteArray* jsByteArray = asByteArray(baseValue);
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32Fast()) {
- jsByteArray->setIndex(i, value.getInt32Fast());
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
return;
} else {
double dValue = 0;
@@ -1690,14 +1950,9 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
if (LIKELY(i >= 0))
asArray(baseValue)->JSArray::put(callFrame, i, value);
else {
- // This should work since we're re-boxing an immediate unboxed in JIT code.
- ASSERT(JSValue::makeInt32Fast(i));
- Identifier property(callFrame, JSValue::makeInt32Fast(i).toString(callFrame));
- // FIXME: can toString throw an exception here?
- if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot;
- baseValue.put(callFrame, property, value, slot);
- }
+ Identifier property(callFrame, UString::from(i));
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
}
CHECK_FOR_EXCEPTION_AT_END();
@@ -1714,14 +1969,14 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
JSValue subscript = stackFrame.args[1].jsValue();
JSValue value = stackFrame.args[2].jsValue();
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
JSByteArray* jsByteArray = asByteArray(baseValue);
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32Fast()) {
- jsByteArray->setIndex(i, value.getInt32Fast());
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
return;
} else {
double dValue = 0;
@@ -1733,7 +1988,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
}
if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
baseValue.put(callFrame, i, value);
} else {
Identifier property(callFrame, subscript.toString(callFrame));
@@ -1772,6 +2027,7 @@ DEFINE_STUB_FUNCTION(int, op_loop_if_true)
DEFINE_STUB_FUNCTION(int, op_load_varargs)
{
STUB_INIT_STACK_FRAME(stackFrame);
+
CallFrame* callFrame = stackFrame.callFrame;
RegisterFile* registerFile = stackFrame.registerFile;
int argsOffset = stackFrame.args[0].int32();
@@ -1786,7 +2042,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
stackFrame.globalData->exception = createStackOverflowError(callFrame);
VM_THROW_EXCEPTION();
}
- int32_t expectedParams = asFunction(callFrame->registers()[RegisterFile::Callee].jsValue())->body()->parameterCount();
+ int32_t expectedParams = callFrame->callee()->body()->parameterCount();
int32_t inplaceArgs = min(providedParams, expectedParams);
Register* inplaceArgsDst = callFrame->registers() + argsOffset;
@@ -1918,7 +2174,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalObject* globalObject = asGlobalObject(stackFrame.args[0].jsValue());
+ JSGlobalObject* globalObject = stackFrame.args[0].globalObject();
Identifier& ident = stackFrame.args[1].identifier();
unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
ASSERT(globalObject->isGlobalObject());
@@ -2042,7 +2298,117 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
return JSValue::encode(number);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
+#if USE(JSVALUE32_64)
+
+DEFINE_STUB_FUNCTION(int, op_eq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ start:
+ if (src2.isUndefined()) {
+ return src1.isNull() ||
+ (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
+ src1.isUndefined();
+ }
+
+ if (src2.isNull()) {
+ return src1.isUndefined() ||
+ (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
+ src1.isNull();
+ }
+
+ if (src1.isInt32()) {
+ if (src2.isDouble())
+ return src1.asInt32() == src2.asDouble();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asInt32() == d;
+ }
+
+ if (src1.isDouble()) {
+ if (src2.isInt32())
+ return src1.asDouble() == src2.asInt32();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asDouble() == d;
+ }
+
+ if (src1.isTrue()) {
+ if (src2.isFalse())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 1.0;
+ }
+
+ if (src1.isFalse()) {
+ if (src2.isTrue())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 0.0;
+ }
+
+ if (src1.isUndefined())
+ return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+
+ if (src1.isNull())
+ return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+
+ ASSERT(src1.isCell());
+
+ JSCell* cell1 = asCell(src1);
+
+ if (cell1->isString()) {
+ if (src2.isInt32())
+ return static_cast<JSString*>(cell1)->value().toDouble() == src2.asInt32();
+
+ if (src2.isDouble())
+ return static_cast<JSString*>(cell1)->value().toDouble() == src2.asDouble();
+
+ if (src2.isTrue())
+ return static_cast<JSString*>(cell1)->value().toDouble() == 1.0;
+
+ if (src2.isFalse())
+ return static_cast<JSString*>(cell1)->value().toDouble() == 0.0;
+
+ ASSERT(src2.isCell());
+ JSCell* cell2 = asCell(src2);
+ if (cell2->isString())
+ return static_cast<JSString*>(cell1)->value() == static_cast<JSString*>(cell2)->value();
+
+ ASSERT(cell2->isObject());
+ src2 = static_cast<JSObject*>(cell2)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+ }
+
+ ASSERT(cell1->isObject());
+ if (src2.isObject())
+ return static_cast<JSObject*>(cell1) == asObject(src2);
+ src1 = static_cast<JSObject*>(cell1)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq_strings)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSString* string1 = stackFrame.args[0].jsString();
+ JSString* string2 = stackFrame.args[1].jsString();
+
+ ASSERT(string1->isString());
+ ASSERT(string2->isString());
+ return string1->value() == string2->value();
+}
+
+#else // USE(JSVALUE32_64)
+
+DEFINE_STUB_FUNCTION(int, op_eq)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2051,12 +2417,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(!JSValue::areBothInt32Fast(src1, src2));
- JSValue result = jsBoolean(JSValue::equalSlowCaseInline(callFrame, src1, src2));
+ bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
+ return result;
}
+#endif // USE(JSVALUE32_64)
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2064,13 +2431,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
JSValue val = stackFrame.args[0].jsValue();
JSValue shift = stackFrame.args[1].jsValue();
- int32_t left;
- uint32_t right;
- if (JSValue::areBothInt32Fast(val, shift))
- return JSValue::encode(jsNumber(stackFrame.globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
- if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left << (right & 0x1f)));
-
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2084,11 +2444,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- int32_t left;
- int32_t right;
- if (src1.numberToInt32(left) && src2.numberToInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left & right));
-
+ ASSERT(!src1.isInt32() || !src2.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2102,15 +2458,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
JSValue val = stackFrame.args[0].jsValue();
JSValue shift = stackFrame.args[1].jsValue();
- int32_t left;
- uint32_t right;
- if (JSFastMath::canDoFastRshift(val, shift))
- return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
- if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left >> (right & 0x1f)));
-
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2121,10 +2471,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
JSValue src = stackFrame.args[0].jsValue();
- int value;
- if (src.numberToInt32(value))
- return JSValue::encode(jsNumber(stackFrame.globalData, ~value));
-
+ ASSERT(!src.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2198,21 +2545,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_neq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- ASSERT(!JSValue::areBothInt32Fast(src1, src2));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(!JSValue::equalSlowCaseInline(callFrame, src1, src2));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2236,14 +2568,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
JSValue shift = stackFrame.args[1].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
-
- if (JSFastMath::canDoFastUrshift(val, shift))
- return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
- else {
- JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
+ JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
@@ -2302,7 +2629,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
JSValue exceptionValue;
JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
- if (UNLIKELY(exceptionValue != JSValue())) {
+ if (UNLIKELY(exceptionValue)) {
stackFrame.globalData->exception = exceptionValue;
VM_THROW_EXCEPTION_AT_END();
}
@@ -2333,7 +2660,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
}
stackFrame.callFrame = callFrame;
- void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
+ void* catchRoutine = handler->nativeCode.executableAddress();
ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
return JSValue::encode(exceptionValue);
@@ -2540,15 +2867,15 @@ DEFINE_STUB_FUNCTION(void*, op_switch_imm)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- if (scrutinee.isInt32Fast())
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).addressForSwitch();
+ if (scrutinee.isInt32())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
else {
double value;
int32_t intValue;
if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value))
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).addressForSwitch();
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).executableAddress();
else
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
}
}
@@ -2561,12 +2888,12 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+ void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
UString::Rep* value = asString(scrutinee)->value().rep();
if (value->size() == 1)
- result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).addressForSwitch();
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress();
}
return result;
@@ -2581,11 +2908,11 @@ DEFINE_STUB_FUNCTION(void*, op_switch_string)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+ void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
UString::Rep* value = asString(scrutinee)->value().rep();
- result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).addressForSwitch();
+ result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
}
return result;
@@ -2689,14 +3016,12 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
}
stackFrame.callFrame = callFrame;
- void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
+ void* catchRoutine = handler->nativeCode.executableAddress();
ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
return JSValue::encode(exceptionValue);
}
-} // namespace JITStubs
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 8e81ade..8f02435 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -38,6 +38,8 @@
namespace JSC {
+ struct StructureStubInfo;
+
class CodeBlock;
class ExecutablePool;
class Identifier;
@@ -53,6 +55,7 @@ namespace JSC {
class RegisterFile;
class FuncDeclNode;
class FuncExprNode;
+ class JSGlobalObject;
class RegExp;
union JITStubArg {
@@ -68,13 +71,23 @@ namespace JSC {
FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); }
RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
- void* returnAddress() { return asPointer; }
+ JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
+ JSString* jsString() { return static_cast<JSString*>(asPointer); }
+ ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
};
#if PLATFORM(X86_64)
struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[8];
+ void* reserved; // Unused
+ JITStubArg args[6];
+ void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
void* savedRBX;
void* savedR15;
@@ -84,20 +97,20 @@ namespace JSC {
void* savedRBP;
void* savedRIP;
- void* code;
- RegisterFile* registerFile;
- CallFrame* callFrame;
- JSValue* exception;
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
- void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; }
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
#elif PLATFORM(X86)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
struct JITStackFrame {
- JITStubArg padding; // Unused
+ void* reserved; // Unused
JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
void* savedEBX;
void* savedEDI;
@@ -113,14 +126,20 @@ namespace JSC {
JSGlobalData* globalData;
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
- void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; }
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
-#elif PLATFORM(ARM_V7)
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#elif PLATFORM_ARM_ARCH(7)
struct JITStackFrame {
- JITStubArg padding; // Unused
+ void* reserved; // Unused
JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
- void* thunkReturnAddress;
+ ReturnAddressPtr thunkReturnAddress;
void* preservedReturnAddress;
void* preservedR4;
@@ -136,7 +155,28 @@ namespace JSC {
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
- void** returnAddressSlot() { return &thunkReturnAddress; }
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#elif PLATFORM(ARM)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[7];
+
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedLink;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
#else
#error "JITStackFrame not defined for this platform."
@@ -183,24 +223,16 @@ namespace JSC {
extern "C" void ctiVMThrowTrampoline();
extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(
-#if PLATFORM(X86_64)
- // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
- // We can allow register passing here, and move the writes of these values into the trampoline.
- void*, void*, void*, void*, void*, void*,
-#endif
- void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
+ extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
class JITThunks {
public:
JITThunks(JSGlobalData*);
- static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&);
- static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const PutPropertySlot&);
-
- MacroAssemblerCodePtr ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo);
+
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
- MacroAssemblerCodePtr ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_ctiVirtualCall; }
MacroAssemblerCodePtr ctiNativeCallThunk() { return m_ctiNativeCallThunk; }
@@ -208,64 +240,13 @@ namespace JSC {
private:
RefPtr<ExecutablePool> m_executablePool;
- MacroAssemblerCodePtr m_ctiArrayLengthTrampoline;
MacroAssemblerCodePtr m_ctiStringLengthTrampoline;
- MacroAssemblerCodePtr m_ctiVirtualCallPreLink;
MacroAssemblerCodePtr m_ctiVirtualCallLink;
MacroAssemblerCodePtr m_ctiVirtualCall;
MacroAssemblerCodePtr m_ctiNativeCallThunk;
};
-namespace JITStubs { extern "C" {
-
- void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_second(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
- JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+extern "C" {
EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
@@ -274,25 +255,22 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_second(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_second(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
@@ -307,16 +285,18 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_neq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_next_pname(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
@@ -327,13 +307,58 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_func(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
-
-}; } // extern "C" namespace JITStubs
+ int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+#if USE(JSVALUE32_64)
+ int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
+#endif
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+} // extern "C"
} // namespace JSC