summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h41
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorPosix.cpp5
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorWin.cpp5
-rw-r--r--JavaScriptCore/jit/JIT.cpp13
-rw-r--r--JavaScriptCore/jit/JIT.h225
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp319
-rw-r--r--JavaScriptCore/jit/JITCall.cpp137
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h135
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp328
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp151
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp159
-rw-r--r--JavaScriptCore/jit/JITStubs.h13
12 files changed, 891 insertions, 640 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 4ed47e3..3274fcc 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -38,6 +38,10 @@
#include <sys/mman.h>
#endif
+#if PLATFORM(SYMBIAN)
+#include <e32std.h>
+#endif
+
#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
@@ -176,30 +180,35 @@ public:
static void cacheFlush(void*, size_t)
{
}
-#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
+#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
sys_icache_invalidate(code, size);
}
-#elif PLATFORM(ARM)
+#elif PLATFORM(SYMBIAN)
+ static void cacheFlush(void* code, size_t size)
+ {
+ User::IMB_Range(code, static_cast<char*>(code) + size);
+ }
+#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX)
static void cacheFlush(void* code, size_t size)
{
- #if COMPILER(GCC) && (GCC_VERSION >= 30406)
- __clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(code) + size);
- #else
- const int syscall = 0xf0002;
- __asm __volatile (
- "mov r0, %0\n"
- "mov r1, %1\n"
- "mov r7, %2\n"
- "mov r2, #0x0\n"
- "swi 0x00000000\n"
- :
- : "r" (code), "r" (reinterpret_cast<char*>(code) + size), "r" (syscall)
- : "r0", "r1", "r7");
- #endif // COMPILER(GCC) && (GCC_VERSION >= 30406)
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1");
}
+#else
+ #error "The cacheFlush support is missing on this platform."
#endif
private:
diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index 4bd5a2c..13a8626 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -44,7 +44,10 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
- ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0)), n };
+ void* allocation = mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+ if (allocation == MAP_FAILED)
+ CRASH();
+ ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
return alloc;
}
diff --git a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
index a9ba7d0..e6ac855 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -42,7 +42,10 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
- ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE)), n};
+ void* allocation = VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+ if (!allocation)
+ CRASH();
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
return alloc;
}
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index 0d6d1b8..ea8434e 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -195,7 +195,7 @@ void JIT::privateCompileMainPass()
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE32)
DEFINE_BINARY_OP(op_div)
#endif
DEFINE_BINARY_OP(op_in)
@@ -230,7 +230,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
-#if USE(JSVALUE32_64)
+#if !USE(JSVALUE32)
DEFINE_OP(op_div)
#endif
DEFINE_OP(op_end)
@@ -379,7 +379,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
-#if USE(JSVALUE32_64)
+#if !USE(JSVALUE32)
DEFINE_SLOWCASE_OP(op_div)
#endif
DEFINE_SLOWCASE_OP(op_eq)
@@ -438,7 +438,7 @@ void JIT::privateCompileSlowCases()
#endif
}
-void JIT::privateCompile()
+JITCode JIT::privateCompile()
{
sampleCodeBlock(m_codeBlock);
#if ENABLE(OPCODE_SAMPLING)
@@ -552,7 +552,7 @@ void JIT::privateCompile()
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
- m_codeBlock->setJITCode(patchBuffer.finalizeCode());
+ return patchBuffer.finalizeCode();
}
#if !USE(JSVALUE32_64)
@@ -587,12 +587,11 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
- ASSERT(calleeCodeBlock);
RepatchBuffer repatchBuffer(callerCodeBlock);
// Currently we only link calls with the exact number of arguments.
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) {
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
if (calleeCodeBlock)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index 93f47d9..0712743 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -191,82 +191,82 @@ namespace JSC {
// MacroAssembler will need to plant register swaps if it is not -
// however the code will still function correctly.
#if PLATFORM(X86_64)
- static const RegisterID returnValueRegister = X86::eax;
- static const RegisterID cachedResultRegister = X86::eax;
- static const RegisterID firstArgumentRegister = X86::edi;
-
- static const RegisterID timeoutCheckRegister = X86::r12;
- static const RegisterID callFrameRegister = X86::r13;
- static const RegisterID tagTypeNumberRegister = X86::r14;
- static const RegisterID tagMaskRegister = X86::r15;
-
- static const RegisterID regT0 = X86::eax;
- static const RegisterID regT1 = X86::edx;
- static const RegisterID regT2 = X86::ecx;
- static const RegisterID regT3 = X86::ebx;
-
- static const FPRegisterID fpRegT0 = X86::xmm0;
- static const FPRegisterID fpRegT1 = X86::xmm1;
- static const FPRegisterID fpRegT2 = X86::xmm2;
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
#elif PLATFORM(X86)
- static const RegisterID returnValueRegister = X86::eax;
- static const RegisterID cachedResultRegister = X86::eax;
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
// On x86 we always use fastcall conventions = but on
// OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86::ecx;
-
- static const RegisterID timeoutCheckRegister = X86::esi;
- static const RegisterID callFrameRegister = X86::edi;
-
- static const RegisterID regT0 = X86::eax;
- static const RegisterID regT1 = X86::edx;
- static const RegisterID regT2 = X86::ecx;
- static const RegisterID regT3 = X86::ebx;
-
- static const FPRegisterID fpRegT0 = X86::xmm0;
- static const FPRegisterID fpRegT1 = X86::xmm1;
- static const FPRegisterID fpRegT2 = X86::xmm2;
-#elif PLATFORM_ARM_ARCH(7)
- static const RegisterID returnValueRegister = ARM::r0;
- static const RegisterID cachedResultRegister = ARM::r0;
- static const RegisterID firstArgumentRegister = ARM::r0;
-
- static const RegisterID regT0 = ARM::r0;
- static const RegisterID regT1 = ARM::r1;
- static const RegisterID regT2 = ARM::r2;
- static const RegisterID regT3 = ARM::r4;
-
- static const RegisterID callFrameRegister = ARM::r5;
- static const RegisterID timeoutCheckRegister = ARM::r6;
-
- static const FPRegisterID fpRegT0 = ARM::d0;
- static const FPRegisterID fpRegT1 = ARM::d1;
- static const FPRegisterID fpRegT2 = ARM::d2;
-#elif PLATFORM(ARM)
- static const RegisterID returnValueRegister = ARM::r0;
- static const RegisterID cachedResultRegister = ARM::r0;
- static const RegisterID firstArgumentRegister = ARM::r0;
-
- static const RegisterID timeoutCheckRegister = ARM::r5;
- static const RegisterID callFrameRegister = ARM::r4;
- static const RegisterID ctiReturnRegister = ARM::r6;
-
- static const RegisterID regT0 = ARM::r0;
- static const RegisterID regT1 = ARM::r1;
- static const RegisterID regT2 = ARM::r2;
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+#elif PLATFORM(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+#elif PLATFORM(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+ static const RegisterID ctiReturnRegister = ARMRegisters::r6;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
// Callee preserved
- static const RegisterID regT3 = ARM::r7;
+ static const RegisterID regT3 = ARMRegisters::r7;
- static const RegisterID regS0 = ARM::S0;
+ static const RegisterID regS0 = ARMRegisters::S0;
// Callee preserved
- static const RegisterID regS1 = ARM::S1;
+ static const RegisterID regS1 = ARMRegisters::S1;
- static const RegisterID regStackPtr = ARM::sp;
- static const RegisterID regLink = ARM::lr;
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
- static const FPRegisterID fpRegT0 = ARM::d0;
- static const FPRegisterID fpRegT1 = ARM::d1;
- static const FPRegisterID fpRegT2 = ARM::d2;
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
#else
#error "JIT not supported on this platform."
#endif
@@ -277,10 +277,9 @@ namespace JSC {
static const int patchGetByIdDefaultOffset = 256;
public:
- static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
{
- JIT jit(globalData, codeBlock);
- jit.privateCompile();
+ return JIT(globalData, codeBlock).privateCompile();
}
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -353,7 +352,7 @@ namespace JSC {
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
- void privateCompile();
+ JITCode privateCompile();
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
@@ -380,14 +379,18 @@ namespace JSC {
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+ bool isOperandConstantImmediateDouble(unsigned src);
+
+ void emitLoadDouble(unsigned index, FPRegisterID value);
+ void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
+
+ Address addressFor(unsigned index, RegisterID base = callFrameRegister);
#if USE(JSVALUE32_64)
Address tagFor(unsigned index, RegisterID base = callFrameRegister);
Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
- Address addressFor(unsigned index, RegisterID base = callFrameRegister);
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
- bool isOperandConstantImmediateDouble(unsigned src);
void emitLoadTag(unsigned index, RegisterID tag);
void emitLoadPayload(unsigned index, RegisterID payload);
@@ -395,8 +398,6 @@ namespace JSC {
void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
- void emitLoadDouble(unsigned index, FPRegisterID value);
- void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
@@ -500,6 +501,7 @@ namespace JSC {
JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
#if !USE(JSVALUE64)
@@ -512,7 +514,11 @@ namespace JSC {
void emitTagAsBoolImmediate(RegisterID reg);
void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+#if USE(JSVALUE64)
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
+#else
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
+#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
@@ -536,7 +542,7 @@ namespace JSC {
static const int patchOffsetGetByIdPropertyMapOffset = 31;
static const int patchOffsetGetByIdPutResult = 31;
#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 63;
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
#else
static const int patchOffsetGetByIdSlowCaseCall = 41;
#endif
@@ -572,7 +578,7 @@ namespace JSC {
static const int patchOffsetMethodCheckProtoObj = 11;
static const int patchOffsetMethodCheckProtoStruct = 18;
static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM_ARM_ARCH(7)
+#elif PLATFORM(ARM_THUMB2)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
static const int patchOffsetPutByIdExternalLoad = 20;
@@ -595,9 +601,62 @@ namespace JSC {
static const int patchOffsetMethodCheckProtoObj = 18;
static const int patchOffsetMethodCheckProtoStruct = 28;
static const int patchOffsetMethodCheckPutFunction = 46;
+#elif PLATFORM(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdExternalLoad = 16;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 20;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdExternalLoad = 16;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset = 20;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 36;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
#endif
#endif // USE(JSVALUE32_64)
+#if PLATFORM(ARM_TRADITIONAL)
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 28;
+ static const int sequenceGetByIdHotPathConstantSpace = 3;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 28;
+ static const int sequencePutByIdConstantSpace = 3;
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+
+ void beginUninterruptedSequence(int, int);
+ void endUninterruptedSequence(int, int);
+
+#else
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
+#define END_UNINTERRUPTED_SEQUENCE(name)
+#endif
+
void emit_op_add(Instruction*);
void emit_op_bitand(Instruction*);
void emit_op_bitnot(Instruction*);
@@ -741,6 +800,7 @@ namespace JSC {
/* These functions are deprecated: Please use JITStubCall instead. */
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
#if USE(JSVALUE32_64)
+ void emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber);
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2);
#else
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
@@ -835,6 +895,13 @@ namespace JSC {
int m_lastResultBytecodeRegister;
unsigned m_jumpTargetsPosition;
#endif
+
+#ifndef NDEBUG
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ Label m_uninterruptedInstructionSequenceBegin;
+ int m_uninterruptedConstantSequenceBegin;
+#endif
+#endif
} JIT_CLASS_ALIGNMENT;
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index ea343d8..7afc1f2 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -566,6 +566,14 @@ void JIT::emit_op_add(Instruction* currentInstruction)
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+ return;
+ }
+
JumpList notInt32Op1;
JumpList notInt32Op2;
@@ -630,19 +638,21 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
unsigned op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
linkSlowCase(iter); // overflow check
- if (!supportsFloatingPoint()) {
+ if (!supportsFloatingPoint())
linkSlowCase(iter); // non-sse case
- return;
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
}
-
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
} else {
linkSlowCase(iter); // overflow check
@@ -1053,33 +1063,33 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op2 = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- emitLoad(op1, X86::edx, X86::eax);
- move(Imm32(getConstantOperand(op2).asInt32()), X86::ecx);
- addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
+ emitLoad(op1, X86Registers::edx, X86Registers::eax);
+ move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
if (getConstantOperand(op2).asInt32() == -1)
- addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
} else {
- emitLoad2(op1, X86::edx, X86::eax, op2, X86::ebx, X86::ecx);
- addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, X86::ebx, Imm32(JSValue::Int32Tag)));
+ emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- addSlowCase(branch32(Equal, X86::ecx, Imm32(0))); // divide by 0
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
}
- move(X86::eax, X86::ebx); // Save dividend payload, in case of 0.
+ move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
+ m_assembler.idivl_r(X86Registers::ecx);
// If the remainder is zero and the dividend is negative, the result is -0.
- Jump storeResult1 = branchTest32(NonZero, X86::edx);
- Jump storeResult2 = branchTest32(Zero, X86::ebx, Imm32(0x80000000)); // not negative
+ Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
+ Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
emitStore(dst, jsNumber(m_globalData, -0.0));
Jump end = jump();
storeResult1.link(this);
storeResult2.link(this);
- emitStoreInt32(dst, X86::edx, (op1 == dst || op2 == dst));
+ emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
end.link(this);
}
@@ -1847,21 +1857,21 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
+ emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
+ emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
+ emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
#if USE(JSVALUE64)
- addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
+ addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
+ m_assembler.idivl_r(X86Registers::ecx);
#else
- emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+ emitFastArithDeTagImmediate(X86Registers::eax);
+ addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
- signExtend32ToPtr(X86::edx, X86::edx);
+ m_assembler.idivl_r(X86Registers::ecx);
+ signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
#endif
- emitFastArithReTagImmediate(X86::edx, X86::eax);
+ emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
emitPutVirtualRegister(result);
}
@@ -1877,14 +1887,14 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
Jump notImm1 = getSlowCase(iter);
Jump notImm2 = getSlowCase(iter);
linkSlowCase(iter);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+ emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
+ emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
notImm1.link(this);
notImm2.link(this);
#endif
JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(X86::eax);
- stubCall.addArgument(X86::ecx);
+ stubCall.addArgument(X86Registers::eax);
+ stubCall.addArgument(X86Registers::ecx);
stubCall.call(result);
}
@@ -1932,55 +1942,87 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign
emitFastArithIntToImmNoCheck(regT0, regT0);
}
-void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
{
// We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
-
- Jump notImm1 = getSlowCase(iter);
- Jump notImm2 = getSlowCase(iter);
+
+ Jump notImm1;
+ Jump notImm2;
+ if (op1HasImmediateIntFastCase) {
+ notImm2 = getSlowCase(iter);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1 = getSlowCase(iter);
+ } else {
+ notImm1 = getSlowCase(iter);
+ notImm2 = getSlowCase(iter);
+ }
linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
- if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
+ if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
linkSlowCase(iter);
emitGetVirtualRegister(op1, regT0);
Label stubFunctionCall(this);
JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
+ if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
+ emitGetVirtualRegister(op1, regT0);
+ emitGetVirtualRegister(op2, regT1);
+ }
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(result);
Jump end = jump();
- // if we get here, eax is not an int32, edx not yet checked.
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT1);
- Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT2);
- Jump op2wasInteger = jump();
-
- // if we get here, eax IS an int32, edx is not.
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- convertInt32ToDouble(regT0, fpRegT1);
- op2isDouble.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT2);
- op2wasInteger.link(this);
+ if (op1HasImmediateIntFastCase) {
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op1, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op2, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else {
+ // if we get here, eax is not an int32, edx not yet checked.
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
+ Jump op2wasInteger = jump();
+
+ // if we get here, eax IS an int32, edx is not.
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
+ op2isDouble.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT2);
+ op2wasInteger.link(this);
+ }
if (opcodeID == op_add)
addDouble(fpRegT2, fpRegT1);
else if (opcodeID == op_sub)
subDouble(fpRegT2, fpRegT1);
- else {
- ASSERT(opcodeID == op_mul);
+ else if (opcodeID == op_mul)
mulDouble(fpRegT2, fpRegT1);
+ else {
+ ASSERT(opcodeID == op_div);
+ divDouble(fpRegT2, fpRegT1);
}
moveDoubleToPtr(fpRegT1, regT0);
subPtr(tagTypeNumberRegister, regT0);
@@ -2025,16 +2067,14 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else
- compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
}
void JIT::emit_op_mul(Instruction* currentInstruction)
@@ -2069,17 +2109,106 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
- || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else
- compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+}
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateDouble(op1)) {
+ emitGetVirtualRegister(op1, regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitLoadInt32ToDouble(op1, fpRegT0);
+ } else {
+ emitGetVirtualRegister(op1, regT0);
+ if (!types.first().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ skipDoubleLoad.link(this);
+ }
+
+ if (isOperandConstantImmediateDouble(op2)) {
+ emitGetVirtualRegister(op2, regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoadInt32ToDouble(op2, fpRegT1);
+ } else {
+ emitGetVirtualRegister(op2, regT1);
+ if (!types.second().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT1);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ skipDoubleLoad.link(this);
+ }
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ Jump end;
+ bool attemptIntConversion = (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) && isOperandConstantImmediateInt(op2);
+ if (attemptIntConversion) {
+ m_assembler.cvttsd2si_rr(fpRegT0, regT0);
+ doubleResult.append(branchTest32(Zero, regT0));
+ m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
+
+ doubleResult.append(m_assembler.jne());
+ doubleResult.append(m_assembler.jp());
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ end = jump();
+ }
+
+ // Double result.
+ doubleResult.link(this);
+ moveDoubleToPtr(fpRegT0, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+
+ if (attemptIntConversion)
+ end.link(this);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
+#ifndef NDEBUG
+ breakpoint();
+#endif
+ return;
+ }
+ if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
+ if (!types.second().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
}
void JIT::emit_op_sub(Instruction* currentInstruction)
@@ -2090,7 +2219,6 @@ void JIT::emit_op_sub(Instruction* currentInstruction)
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
compileBinaryArithOp(op_sub, result, op1, op2, types);
-
emitPutVirtualRegister(result);
}
@@ -2101,7 +2229,7 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
+ compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
}
#else // USE(JSVALUE64)
@@ -2284,6 +2412,15 @@ void JIT::emit_op_add(Instruction* currentInstruction)
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ return;
+ }
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
@@ -2298,15 +2435,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
signExtend32ToPtr(regT0, regT0);
emitPutVirtualRegister(result);
} else {
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (types.first().mightBeNumber() && types.second().mightBeNumber())
- compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
- else {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- }
+ compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
}
@@ -2316,6 +2445,10 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
if (isOperandConstantImmediateInt(op1)) {
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index 7fdb845..cfaa69f 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -64,10 +64,9 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
int argCount = instruction[3].u.operand;
int registerOffset = instruction[4].u.operand;
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitPutJITStubArgConstant(registerOffset, 3);
- emitPutJITStubArgConstant(argCount, 5);
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
}
void JIT::compileOpConstructSetupArgs(Instruction* instruction)
@@ -77,20 +76,18 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
int proto = instruction[5].u.operand;
int thisRegister = instruction[6].u.operand;
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitPutJITStubArgConstant(registerOffset, 3);
- emitPutJITStubArgConstant(argCount, 5);
- emitPutJITStubArgFromVirtualRegister(proto, 7, regT2, regT3);
- emitPutJITStubArgConstant(thisRegister, 9);
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3);
+ emitPutJITStubArgConstant(thisRegister, 4);
}
void JIT::compileOpCallVarargsSetupArgs(Instruction*)
{
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitPutJITStubArg(regT3, 3); // registerOffset
- emitPutJITStubArg(regT2, 5); // argCount
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArg(regT3, 1); // registerOffset
+ emitPutJITStubArg(regT2, 2); // argCount
}
void JIT::compileOpCallVarargs(Instruction* instruction)
@@ -239,19 +236,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
int argCount = instruction[3].u.operand;
int registerOffset = instruction[4].u.operand;
- Jump wasEval1;
- Jump wasEval2;
+ Jump wasEval;
if (opcodeID == op_call_eval) {
JITStubCall stubCall(this, cti_op_call_eval);
stubCall.addArgument(callee);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval1 = branchTest32(NonZero, regT0);
- wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
}
- emitLoad(callee, regT1, regT2);
+ emitLoad(callee, regT1, regT0);
if (opcodeID == op_call)
compileOpCallSetupArgs(instruction);
@@ -259,12 +254,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
compileOpConstructSetupArgs(instruction);
emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT2);
+ emitLoad(callee, regT1, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -274,12 +269,10 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
- if (opcodeID == op_call_eval) {
- wasEval1.link(this);
- wasEval2.link(this);
- }
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
- emitStore(dst, regT1, regT0);;
+ emitStore(dst, regT1, regT0);
sampleCodeBlock(m_codeBlock);
}
@@ -309,16 +302,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int argCount = instruction[3].u.operand;
int registerOffset = instruction[4].u.operand;
- Jump wasEval1;
- Jump wasEval2;
+ Jump wasEval;
if (opcodeID == op_call_eval) {
JITStubCall stubCall(this, cti_op_call_eval);
stubCall.addArgument(callee);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval1 = branchTest32(NonZero, regT0);
- wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
}
emitLoad(callee, regT1, regT0);
@@ -362,10 +353,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Call to the callee
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
- if (opcodeID == op_call_eval) {
- wasEval1.link(this);
- wasEval2.link(this);
- }
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
// Put the return value in dst. In the interpreter, op_ret does this.
emitStore(dst, regT1, regT0);
@@ -439,10 +428,10 @@ void JIT::compileOpCallInitializeCallFrame()
{
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
- storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
@@ -452,9 +441,9 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(argCount, 3);
- emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgConstant(registerOffset, 1);
}
void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
@@ -462,10 +451,10 @@ void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArg(regT1, 2);
+ addPtr(Imm32(registerOffset), regT1, regT2);
emitPutJITStubArg(regT2, 1);
- emitPutJITStubArg(regT1, 3);
- addPtr(Imm32(registerOffset), regT1, regT0);
- emitPutJITStubArg(regT0, 2);
}
void JIT::compileOpConstructSetupArgs(Instruction* instruction)
@@ -476,11 +465,11 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
int thisRegister = instruction[6].u.operand;
// ecx holds func
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(registerOffset, 2);
- emitPutJITStubArgConstant(argCount, 3);
- emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
- emitPutJITStubArgConstant(thisRegister, 5);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
+ emitPutJITStubArgConstant(thisRegister, 4);
}
void JIT::compileOpCallVarargs(Instruction* instruction)
@@ -490,20 +479,20 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
int argCountRegister = instruction[3].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
compileOpCallVarargsSetupArgs(instruction);
// Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
-
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
// Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT0, regT0);
+ mul32(Imm32(sizeof(Register)), regT2, regT2);
intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
- addPtr(Imm32((int32_t)offset), regT0, regT3);
+ addPtr(Imm32((int32_t)offset), regT2, regT3);
addPtr(callFrameRegister, regT3);
storePtr(callFrameRegister, regT3);
- addPtr(regT0, callFrameRegister);
+ addPtr(regT2, callFrameRegister);
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
// Put the return value in dst. In the interpreter, op_ret does this.
@@ -539,14 +528,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
Jump wasEval;
if (opcodeID == op_call_eval) {
JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee, regT2);
+ stubCall.addArgument(callee, regT0);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
// The arguments have been set up on the hot path for op_call_eval
if (opcodeID == op_call)
compileOpCallSetupArgs(instruction);
@@ -554,13 +543,13 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
compileOpConstructSetupArgs(instruction);
// Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -606,7 +595,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
Jump wasEval;
if (opcodeID == op_call_eval) {
JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee, regT2);
+ stubCall.addArgument(callee, regT0);
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
@@ -615,9 +604,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
// This deliberately leaves the callee in ecx, used when setting up the stack frame below
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
DataLabelPtr addressOfLinkedFunctionCheck;
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
@@ -629,18 +624,18 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int proto = instruction[5].u.operand;
int thisRegister = instruction[6].u.operand;
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
JITStubCall stubCall(this, cti_op_construct_JSConstruct);
stubCall.call(thisRegister);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
}
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
- storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
@@ -674,13 +669,13 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
compileOpConstructSetupArgs(instruction);
// Fast check for JS function.
- Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT2);
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -688,6 +683,8 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
+ move(regT0, regT2);
+
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
// Put the return value in dst.
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index b5aaafc..f26457a 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -37,28 +37,37 @@ namespace JSC {
// puts an arg onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
{
- poke(src, argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(src, argumentStackOffset);
}
/* Deprecated: Please use JITStubCall instead. */
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
{
- poke(Imm32(value), argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(Imm32(value), argumentStackOffset);
}
/* Deprecated: Please use JITStubCall instead. */
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
{
- poke(ImmPtr(value), argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(ImmPtr(value), argumentStackOffset);
}
/* Deprecated: Please use JITStubCall instead. */
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
- peek(dst, argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ peek(dst, argumentStackOffset);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
}
ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
@@ -102,38 +111,71 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
return nakedCall;
}
-#if PLATFORM(X86) || PLATFORM(X86_64) || (PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7))
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+
+ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
+{
+#if PLATFORM(ARM_TRADITIONAL)
+#ifndef NDEBUG
+ // Ensure the label after the sequence can also fit
+ insnSpace += sizeof(ARMWord);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ ensureSpace(insnSpace, constSpace);
+
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#ifndef NDEBUG
+ m_uninterruptedInstructionSequenceBegin = label();
+ m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
+#endif
+#endif
+}
+
+ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
+{
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
+#endif
+}
+
+#endif
+
+#if PLATFORM(ARM_THUMB2)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
- pop(reg);
+ move(linkRegister, reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- push(reg);
+ move(reg, linkRegister);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
{
- push(address);
+ loadPtr(address, linkRegister);
}
-#elif PLATFORM_ARM_ARCH(7)
+#else // PLATFORM(X86) || PLATFORM(X86_64) || PLATFORM(ARM_TRADITIONAL)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
- move(linkRegister, reg);
+ pop(reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- move(reg, linkRegister);
+ push(reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
{
- loadPtr(address, linkRegister);
+ push(address);
}
#endif
@@ -149,8 +191,8 @@ ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-#if PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7)
- move(ctiReturnRegister, ARM::lr);
+#if PLATFORM(ARM_TRADITIONAL)
+ move(ctiReturnRegister, ARMRegisters::lr);
#endif
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
@@ -158,7 +200,7 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
#if PLATFORM(X86)
// Within a trampoline the return address will be on the stack at this point.
addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif PLATFORM_ARM_ARCH(7)
+#elif PLATFORM(ARM_THUMB2)
move(stackPointerRegister, firstArgumentRegister);
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
@@ -242,8 +284,8 @@ ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t cou
#if PLATFORM(X86_64)
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
{
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
+ move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
}
#else
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
@@ -257,8 +299,8 @@ ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostF
#if PLATFORM(X86_64)
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
{
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
- storePtr(ImmPtr(codeBlock), X86::ecx);
+ move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
+ storePtr(ImmPtr(codeBlock), X86Registers::ecx);
}
#else
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
@@ -268,6 +310,11 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
+inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)));
+}
+
#if USE(JSVALUE32_64)
inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
@@ -280,11 +327,6 @@ inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
}
-inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)));
-}
-
inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
{
RegisterID mappedTag;
@@ -542,23 +584,28 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
return false;
}
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(payload, argumentStackOffset);
+ poke(tag, argumentStackOffset + 1);
}
/* Deprecated: Please use JITStubCall instead. */
ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue constant = m_codeBlock->getConstant(src);
- poke(Imm32(constant.payload()), argumentNumber);
- poke(Imm32(constant.tag()), argumentNumber + 1);
+ poke(Imm32(constant.payload()), argumentStackOffset);
+ poke(Imm32(constant.tag()), argumentStackOffset + 1);
} else {
emitLoad(src, scratch1, scratch2);
- poke(scratch2, argumentNumber);
- poke(scratch1, argumentNumber + 1);
+ poke(scratch2, argumentStackOffset);
+ poke(scratch1, argumentStackOffset + 1);
}
}
@@ -685,6 +732,24 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
{
return branchTestPtr(Zero, reg, tagTypeNumberRegister);
}
+
+inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
+ } else
+ convertInt32ToDouble(addressFor(index), value);
+}
#endif
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
@@ -722,6 +787,11 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1,
addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
}
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateNumber(reg));
+}
+
#if !USE(JSVALUE64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
@@ -779,12 +849,13 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
- emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
+ poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
} else {
loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
- emitPutJITStubArg(scratch, argumentNumber);
+ poke(scratch, argumentStackOffset);
}
killLastResultRegister();
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 13fc981..f362d75 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -64,90 +64,77 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
#if ENABLE(JIT_OPTIMIZE_CALL)
- /* VirtualCallLink Trampoline */
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // regT0 holds callee, regT1 holds argCount.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
- Jump hasCodeBlock2 = branchTestPtr(NonZero, regT2);
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
- // Lazily generate a CodeBlock.
- preserveReturnAddressAfterCall(regT3); // return address
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction2 = call();
- move(regT0, regT2);
- emitGetJITStubArg(1, regT0); // callee
- emitGetJITStubArg(5, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3); // return address
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
hasCodeBlock2.link(this);
- // regT2 holds codeBlock.
- Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
// Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 3); // return address
- emitPutJITStubArg(regT2, 7); // codeBlock
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callArityCheck2 = call();
move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT0); // callee
- emitGetJITStubArg(5, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3); // return address
-
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
arityCheckOkay2.link(this);
+
isNativeFunc2.link(this);
compileOpCallInitializeCallFrame();
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 3);
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callLazyLinkCall = call();
restoreReturnAddressBeforeReturn(regT3);
jump(regT0);
#endif // ENABLE(JIT_OPTIMIZE_CALL)
- /* VirtualCall Trampoline */
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // regT0 holds callee, regT1 holds argCount.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
- Jump hasCodeBlock3 = branchTestPtr(NonZero, regT2);
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
- // Lazily generate a CodeBlock.
- preserveReturnAddressAfterCall(regT3); // return address
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction1 = call();
- move(regT0, regT2);
- emitGetJITStubArg(1, regT0); // callee
- emitGetJITStubArg(5, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3); // return address
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
hasCodeBlock3.link(this);
- // regT2 holds codeBlock.
- Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 3); // return address
- emitPutJITStubArg(regT2, 7); // codeBlock
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callArityCheck1 = call();
move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT0); // callee
- emitGetJITStubArg(5, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3); // return address
-
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
arityCheckOkay3.link(this);
+
isNativeFunc3.link(this);
+
compileOpCallInitializeCallFrame();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
jump(regT0);
#if PLATFORM(X86)
@@ -237,23 +224,23 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#if COMPILER(MSVC) || PLATFORM(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
// Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
- storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
// Plant callframe
- move(callFrameRegister, X86::edx);
+ move(callFrameRegister, X86Registers::edx);
- call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+ call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
// JSValue is a non-POD type, so eax points to it
- emitLoad(0, regT1, regT0, X86::eax);
+ emitLoad(0, regT1, regT0, X86Registers::eax);
#else
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx); // callee
- move(callFrameRegister, X86::ecx); // callFrame
- call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
+ move(callFrameRegister, X86Registers::ecx); // callFrame
+ call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
#endif
// We've put a few temporaries on the stack in addition to the actual arguments
@@ -261,10 +248,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
// Check for an exception
- // FIXME: Maybe we can optimize this comparison to JSValue().
move(ImmPtr(&globalData->exception), regT2);
- Jump sawException1 = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::CellTag));
- Jump sawException2 = branch32(NonZero, payloadFor(0, regT2), Imm32(0));
+ Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
// Grab the return address.
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
@@ -277,8 +262,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
ret();
// Handle an exception
- sawException1.link(this);
- sawException2.link(this);
+ sawException.link(this);
// Grab the return address.
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
move(ImmPtr(&globalData->exceptionLocation), regT2);
@@ -544,7 +528,7 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
void JIT::emit_op_new_func(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -807,14 +791,17 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
addJump(jump(), target + 2);
- isNotInteger.link(this);
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
+ } else
+ addSlowCase(isNotInteger);
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
-
isTrue.link(this);
isTrue2.link(this);
}
@@ -845,14 +832,17 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
addJump(jump(), target + 2);
- isNotInteger.link(this);
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
+ } else
+ addSlowCase(isNotInteger);
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
-
isFalse.link(this);
isFalse2.link(this);
}
@@ -1180,7 +1170,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1244,7 +1234,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::DeletedValueTag)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
isInt32.link(this);
if (src != dst)
@@ -1388,8 +1378,7 @@ void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
void JIT::emit_op_create_arguments(Instruction*)
{
- Jump argsNotCell = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::CellTag));
- Jump argsNotNull = branchTestPtr(NonZero, payloadFor(RegisterFile::ArgumentsRegister, callFrameRegister));
+ Jump argsCreated = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::EmptyValueTag));
// If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
if (m_codeBlock->m_numParameters == 1)
@@ -1397,8 +1386,7 @@ void JIT::emit_op_create_arguments(Instruction*)
else
JITStubCall(this, cti_op_create_arguments).call();
- argsNotCell.link(this);
- argsNotNull.link(this);
+ argsCreated.link(this);
}
void JIT::emit_op_init_arguments(Instruction*)
@@ -1484,85 +1472,77 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction2 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
hasCodeBlock2.link(this);
- Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
// Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callArityCheck2 = call();
move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
+ emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
arityCheckOkay2.link(this);
+
isNativeFunc2.link(this);
compileOpCallInitializeCallFrame();
-
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callLazyLinkCall = call();
restoreReturnAddressBeforeReturn(regT3);
-
jump(regT0);
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction1 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
+ emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
hasCodeBlock3.link(this);
- Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
// Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
+ emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
Call callArityCheck1 = call();
move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
+ emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
arityCheckOkay3.link(this);
+
isNativeFunc3.link(this);
- // load ctiCode from the new codeBlock.
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
-
compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
jump(regT0);
-
Label nativeCallThunk = align();
preserveReturnAddressAfterCall(regT0);
emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
@@ -1575,39 +1555,39 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#if PLATFORM(X86_64)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
// Allocate stack space for our arglist
subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
// Set up arguments
- subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
+ subPtr(Imm32(1), X86Registers::ecx); // Don't include 'this' in argcount
// Push argcount
- storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+ storePtr(X86Registers::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
// Calculate the start of the callframe header, and store in edx
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86Registers::edx);
// Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
- mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
- subPtr(X86::ecx, X86::edx);
+ mul32(Imm32(sizeof(Register)), X86Registers::ecx, X86Registers::ecx);
+ subPtr(X86Registers::ecx, X86Registers::edx);
// push pointer to arguments
- storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+ storePtr(X86Registers::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
// ArgList is passed by reference so is stackPointerRegister
- move(stackPointerRegister, X86::ecx);
+ move(stackPointerRegister, X86Registers::ecx);
// edx currently points to the first argument, edx-sizeof(Register) points to 'this'
- loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
+ loadPtr(Address(X86Registers::edx, -(int32_t)sizeof(Register)), X86Registers::edx);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
- move(callFrameRegister, X86::edi);
+ move(callFrameRegister, X86Registers::edi);
- call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
+ call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
#elif PLATFORM(X86)
@@ -1676,33 +1656,33 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#if COMPILER(MSVC) || PLATFORM(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
// Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
- storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
// Plant callframe
- move(callFrameRegister, X86::edx);
+ move(callFrameRegister, X86Registers::edx);
- call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+ call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
// JSValue is a non-POD type
- loadPtr(Address(X86::eax), X86::eax);
+ loadPtr(Address(X86Registers::eax), X86Registers::eax);
#else
// Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx);
// Plant callframe
- move(callFrameRegister, X86::ecx);
- call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+ move(callFrameRegister, X86Registers::ecx);
+ call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
#endif
// We've put a few temporaries on the stack in addition to the actual arguments
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM) && !PLATFORM_ARM_ARCH(7)
+#elif PLATFORM(ARM_TRADITIONAL)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
@@ -1736,9 +1716,9 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(callFrameRegister, regT0);
// Setup arg4: This is a plain hack
- move(stackPointerRegister, ARM::S0);
+ move(stackPointerRegister, ARMRegisters::S0);
- move(ctiReturnRegister, ARM::lr);
+ move(ctiReturnRegister, ARMRegisters::lr);
call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
@@ -1971,7 +1951,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
void JIT::emit_op_new_func(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -2325,7 +2305,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -2711,32 +2691,20 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
- Label beginGetByValSlow(this);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(regT1, regT1);
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
- notImm.link(this);
JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
- // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
- // First, check if this is an access to the vector
- linkSlowCase(iter);
- branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow);
-
- // okay, missed the fast region, but it is still in the vector. Get the value.
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2);
- // Check whether the value loaded is zero; if so we need to return undefined.
- branchTestPtr(Zero, regT2, beginGetByValSlow);
- move(regT2, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
}
void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -2793,30 +2761,20 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- // Normal slow cases - either is not an immediate imm, or is an array.
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(regT1, regT1);
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
- notImm.link(this); {
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
- }
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
- // slow cases for immediate int accesses to arrays
- linkSlowCase(iter);
- linkSlowCase(iter); {
- JITStubCall stubCall(this, cti_op_put_by_val_array);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
- }
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(regT0);
+ stubPutByValCall.addArgument(property, regT2);
+ stubPutByValCall.addArgument(value, regT2);
+ stubPutByValCall.call();
}
void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 9dba2e2..9edfd01 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -273,11 +273,14 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+
emitStore(dst, regT1, regT0);
map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
}
@@ -288,35 +291,16 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
- // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
- Label callGetByValJITStub(this);
-
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(base);
stubCall.addArgument(property);
stubCall.call(dst);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
- linkSlowCase(iter); // array fast cut-off check
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
- branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
-
- // Missed the fast region, but it is still in the vector.
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
-
- // FIXME: Maybe we can optimize this comparison to JSValue().
- Jump skip = branch32(NotEqual, regT0, Imm32(0));
- branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
-
- skip.link(this);
- emitStore(dst, regT1, regT0);
}
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
@@ -330,24 +314,27 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
-
- Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- // Check if the access is within the vector.
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
-
- // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
- // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
- Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
- addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
- skip.link(this);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- inFastVector.link(this);
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
+ Label storeResult(this);
emitLoad(value, regT1, regT0);
store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+ Jump end = jump();
+
+ empty.link(this);
+ add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(Imm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
}
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -359,24 +346,13 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
stubPutByValCall.addArgument(base);
stubPutByValCall.addArgument(property);
stubPutByValCall.addArgument(value);
stubPutByValCall.call();
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
- // Slow cases for immediate int accesses to arrays.
- linkSlowCase(iter); // in vector check
- linkSlowCase(iter); // written to slot check
-
- JITStubCall stubCall(this, cti_op_put_by_val_array);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(regT2);
- stubCall.addArgument(value);
- stubCall.call();
}
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
@@ -958,12 +934,16 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
#if USE(JSVALUE64)
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
- // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
// size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
// to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
// extending since it makes it easier to re-tag the value in the slow case.
@@ -971,21 +951,25 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
#else
emitFastArithImmToInt(regT1);
#endif
- emitJumpSlowCaseIfNotJSCell(regT0);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- // Get the value from the vector
loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ addSlowCase(branchTestPtr(Zero, regT0));
+
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
- emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
#if USE(JSVALUE64)
// See comment in op_get_by_val.
@@ -993,23 +977,29 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
#else
emitFastArithImmToInt(regT1);
#endif
- emitJumpSlowCaseIfNotJSCell(regT0);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
- // No; oh well, check if the access if within the vector - if so, we may still be okay.
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
- // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
- // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
- addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
+ Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
- // All good - put the value into the array.
- inFastVector.link(this);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ Label storeResult(this);
+ emitGetVirtualRegister(value, regT0);
storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump end = jump();
+
+ empty.link(this);
+ add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ move(regT1, regT0);
+ add32(Imm32(1), regT0);
+ store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
}
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
@@ -1122,13 +1112,20 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
// Do the method check - check the object & its prototype's structure inline (this is the common case).
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
Jump notCell = emitJumpIfNotJSCell(regT0);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
Jump match = jump();
ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
@@ -1192,6 +1189,8 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
@@ -1210,6 +1209,9 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
}
@@ -1233,6 +1235,8 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
#ifndef NDEBUG
Label coldPathBegin(this);
#endif
@@ -1241,6 +1245,8 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
stubCall.addArgument(ImmPtr(ident));
Call call = stubCall.call(resultVReg);
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
@@ -1264,6 +1270,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
@@ -1279,6 +1287,9 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
}
@@ -1382,7 +1393,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
stubCall.call(regT0);
- emitGetJITStubArg(3, regT1);
+ emitGetJITStubArg(2, regT1);
restoreReturnAddressBeforeReturn(regT3);
}
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index d563f58..c7257af 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -69,6 +69,18 @@ namespace JSC {
#define SYMBOL_STRING(name) #name
#endif
+#if PLATFORM(IPHONE)
+#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
+#else
+#define THUMB_FUNC_PARAM(name)
+#endif
+
+#if PLATFORM(LINUX) && PLATFORM(X86_64)
+#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
+#else
+#define SYMBOL_STRING_RELOCATION(name) SYMBOL_STRING(name)
+#endif
+
#if USE(JSVALUE32_64)
#if COMPILER(GCC) && PLATFORM(X86)
@@ -106,7 +118,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
#endif
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x3c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
@@ -169,7 +181,7 @@ asm volatile (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addq $0x48, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
@@ -193,7 +205,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
+#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
@@ -204,7 +216,7 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"sub sp, sp, #0x3c" "\n"
"str lr, [sp, #0x20]" "\n"
@@ -230,10 +242,10 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
@@ -247,7 +259,7 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
@@ -359,7 +371,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
#endif
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x1c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
@@ -428,7 +440,7 @@ asm volatile (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addq $0x78, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
@@ -452,7 +464,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
+#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
@@ -463,9 +475,9 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #0x3c" "\n"
+ "sub sp, sp, #0x40" "\n"
"str lr, [sp, #0x20]" "\n"
"str r4, [sp, #0x24]" "\n"
"str r5, [sp, #0x28]" "\n"
@@ -480,7 +492,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
"ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "add sp, sp, #0x40" "\n"
"bx lr" "\n"
);
@@ -489,15 +501,15 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
"ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "add sp, sp, #0x40" "\n"
"bx lr" "\n"
);
@@ -506,7 +518,7 @@ asm volatile (
".align 2" "\n"
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
@@ -516,7 +528,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"bx lr" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM)
+#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL)
asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
@@ -548,7 +560,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"mov lr, r6" "\n"
"add r8, pc, #4" "\n"
"str r8, [sp, #-4]!" "\n"
- "b " SYMBOL_STRING(cti_vm_throw) "\n"
+ "b " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
// Both has the same return sequence
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
@@ -636,7 +648,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
{
JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
-#if PLATFORM_ARM_ARCH(7)
+#if PLATFORM(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
@@ -649,7 +661,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
// The fifth argument is the first item already on the stack.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x3c);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x40);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
#endif
@@ -673,7 +685,7 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isDictionary()) {
+ if (structure->isUncacheableDictionary()) {
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
@@ -689,7 +701,7 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- if (!prototypeChain->isCacheable()) {
+ if (!prototypeChain->isCacheable() || structure->isDictionary()) {
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
@@ -737,7 +749,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isDictionary()) {
+ if (structure->isUncacheableDictionary()) {
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
@@ -876,7 +888,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
} \
} while (0)
-#if PLATFORM_ARM_ARCH(7)
+#if PLATFORM(ARM_THUMB2)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
@@ -887,7 +899,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
".align 2" "\n" \
".globl " SYMBOL_STRING(cti_##op) "\n" \
".thumb" "\n" \
- ".thumb_func " SYMBOL_STRING(cti_##op) "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
"str lr, [sp, #0x1c]" "\n" \
"bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
@@ -1148,7 +1160,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
JSObject* slotBaseObject;
if (baseValue.isCell()
&& slot.isCacheable()
- && !(structure = asCell(baseValue)->structure())->isDictionary()
+ && !(structure = asCell(baseValue)->structure())->isUncacheableDictionary()
&& (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
&& specific
) {
@@ -1176,7 +1188,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// for now. For now it performs a check on a special object on the global object only used for this
// purpose. The object is in no way exposed, and as such the check will always pass.
if (slot.slotBase() == baseValue) {
- JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS);
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
return JSValue::encode(result);
}
}
@@ -1222,7 +1234,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
if (baseValue.isCell()
&& slot.isCacheable()
- && !asCell(baseValue)->structure()->isDictionary()
+ && !asCell(baseValue)->structure()->isUncacheableDictionary()
&& slot.slotBase() == baseValue) {
CodeBlock* codeBlock = callFrame->codeBlock();
@@ -1293,7 +1305,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CHECK_FOR_EXCEPTION();
- if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isUncacheableDictionary()) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -1467,7 +1479,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return stackFrame.args[0].funcDeclNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+ return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
@@ -1481,11 +1493,11 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
JSFunction* function = asFunction(stackFrame.args[0].jsValue());
ASSERT(!function->isHostFunction());
- FunctionBodyNode* body = function->body();
+ FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- body->jitCode(callDataScopeChain);
+ executable->jitCode(stackFrame.callFrame, callDataScopeChain);
- return &(body->generatedBytecode());
+ return function;
}
DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
@@ -1493,8 +1505,9 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock();
- ASSERT(newCodeBlock->codeType() != NativeCode);
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecode();
int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
@@ -1531,7 +1544,7 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
callFrame->setCallerFrame(oldCallFrame);
}
- RETURN_POINTER_PAIR(newCodeBlock, callFrame);
+ RETURN_POINTER_PAIR(callee, callFrame);
}
#if ENABLE(JIT_OPTIMIZE_CALL)
@@ -1539,13 +1552,12 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
STUB_INIT_STACK_FRAME(stackFrame);
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
- JITCode& jitCode = callee->body()->generatedJITCode();
+ ExecutableBase* executable = callee->executable();
+ JITCode& jitCode = executable->generatedJITCode();
CodeBlock* codeBlock = 0;
- if (!callee->isHostFunction())
- codeBlock = &callee->body()->bytecode(callee->scope().node());
- else
- codeBlock = &callee->body()->generatedBytecode();
+ if (!executable->isHostFunction())
+ codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecode(stackFrame.callFrame, callee->scope().node());
CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
if (!callLinkInfo->seenOnce())
@@ -1561,7 +1573,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionBodyNode*>(stackFrame.callFrame->codeBlock()->ownerNode()));
+ JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable()));
stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->copy()->push(activation));
return activation;
}
@@ -1732,7 +1744,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
if (stackFrame.args[3].jsValue().isObject())
structure = asObject(stackFrame.args[3].jsValue())->inheritorID();
else
- structure = constructor->scope().node()->globalObject()->emptyObjectStructure();
+ structure = constructor->scope().node()->globalObject->emptyObjectStructure();
return new (stackFrame.globalData) JSObject(structure);
}
@@ -1936,28 +1948,6 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
CHECK_FOR_EXCEPTION_AT_END();
}
-DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue baseValue = stackFrame.args[0].jsValue();
- int i = stackFrame.args[1].int32();
- JSValue value = stackFrame.args[2].jsValue();
-
- ASSERT(isJSArray(stackFrame.globalData, baseValue));
-
- if (LIKELY(i >= 0))
- asArray(baseValue)->JSArray::put(callFrame, i, value);
- else {
- Identifier property(callFrame, UString::from(i));
- PutPropertySlot slot;
- baseValue.put(callFrame, property, value, slot);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2042,7 +2032,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
stackFrame.globalData->exception = createStackOverflowError(callFrame);
VM_THROW_EXCEPTION();
}
- int32_t expectedParams = callFrame->callee()->body()->parameterCount();
+ int32_t expectedParams = callFrame->callee()->jsExecutable()->parameterCount();
int32_t inplaceArgs = min(providedParams, expectedParams);
Register* inplaceArgsDst = callFrame->registers() + argsOffset;
@@ -2182,7 +2172,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isDictionary() && slot.slotBase() == globalObject) {
+ if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
@@ -2358,8 +2348,6 @@ DEFINE_STUB_FUNCTION(int, op_eq)
if (src1.isNull())
return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
- ASSERT(src1.isCell());
-
JSCell* cell1 = asCell(src1);
if (cell1->isString()) {
@@ -2375,21 +2363,18 @@ DEFINE_STUB_FUNCTION(int, op_eq)
if (src2.isFalse())
return static_cast<JSString*>(cell1)->value().toDouble() == 0.0;
- ASSERT(src2.isCell());
JSCell* cell2 = asCell(src2);
if (cell2->isString())
return static_cast<JSString*>(cell1)->value() == static_cast<JSString*>(cell2)->value();
- ASSERT(cell2->isObject());
- src2 = static_cast<JSObject*>(cell2)->toPrimitive(stackFrame.callFrame);
+ src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
CHECK_FOR_EXCEPTION();
goto start;
}
- ASSERT(cell1->isObject());
if (src2.isObject())
- return static_cast<JSObject*>(cell1) == asObject(src2);
- src1 = static_cast<JSObject*>(cell1)->toPrimitive(stackFrame.callFrame);
+ return asObject(cell1) == asObject(src2);
+ src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
CHECK_FOR_EXCEPTION();
goto start;
}
@@ -2517,8 +2502,24 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
{
STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ FunctionExecutable* function = stackFrame.args[0].function();
+ JSFunction* func = function->make(callFrame, callFrame->scopeChain());
+
+ /*
+ The Identifier in a FunctionExpression can be referenced from inside
+ the FunctionExpression's FunctionBody to allow the function to call
+ itself recursively. However, unlike in a FunctionDeclaration, the
+ Identifier in a FunctionExpression cannot be referenced from and
+ does not affect the scope enclosing the FunctionExpression.
+ */
+ if (!function->name().isNull()) {
+ JSStaticScopeObject* functionScopeObject = new (callFrame) JSStaticScopeObject(callFrame, function->name(), func, ReadOnly | DontDelete);
+ func->scope().push(functionScopeObject);
+ }
- return stackFrame.args[0].funcExprNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+ return func;
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
@@ -2624,7 +2625,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
Register* newCallFrame = callFrame->registers() + registerOffset;
Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
JSValue thisValue = argv[0].jsValue();
- JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject();
+ JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject;
if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
JSValue exceptionValue;
@@ -2978,7 +2979,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_error)
unsigned bytecodeOffset = stackFrame.args[2].int32();
unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
- return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL());
+ return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerExecutable()->sourceID(), codeBlock->ownerExecutable()->sourceURL());
}
DEFINE_STUB_FUNCTION(void, op_debug)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 8f02435..daae043 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -42,6 +42,7 @@ namespace JSC {
class CodeBlock;
class ExecutablePool;
+ class FunctionExecutable;
class Identifier;
class JSGlobalData;
class JSGlobalData;
@@ -53,8 +54,6 @@ namespace JSC {
class PropertySlot;
class PutPropertySlot;
class RegisterFile;
- class FuncDeclNode;
- class FuncExprNode;
class JSGlobalObject;
class RegExp;
@@ -67,8 +66,7 @@ namespace JSC {
Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
int32_t int32() { return asInt32; }
CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
- FuncDeclNode* funcDeclNode() { return static_cast<FuncDeclNode*>(asPointer); }
- FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); }
+ FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
@@ -131,7 +129,7 @@ namespace JSC {
#if COMPILER(MSVC)
#pragma pack(pop)
#endif // COMPILER(MSVC)
-#elif PLATFORM_ARM_ARCH(7)
+#elif PLATFORM(ARM_THUMB2)
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
@@ -151,13 +149,15 @@ namespace JSC {
CallFrame* callFrame;
JSValue* exception;
+ void* padding2;
+
// These arguments passed on the stack.
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
-#elif PLATFORM(ARM)
+#elif PLATFORM(ARM_TRADITIONAL)
struct JITStackFrame {
JITStubArg padding; // Unused
JITStubArg args[7];
@@ -345,7 +345,6 @@ extern "C" {
void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);