summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h10
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp46
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorPosix.cpp5
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp5
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorWin.cpp5
-rw-r--r--JavaScriptCore/jit/JIT.cpp4
-rw-r--r--JavaScriptCore/jit/JIT.h16
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp2
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp2
-rw-r--r--JavaScriptCore/jit/JITCall.cpp6
-rw-r--r--JavaScriptCore/jit/JITCall32_64.cpp6
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h9
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp35
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp34
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp85
-rw-r--r--JavaScriptCore/jit/JITStubs.h27
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.cpp18
17 files changed, 156 insertions, 159 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 703f63f..8fd6b71 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -150,12 +150,20 @@ public:
{
if (!pageSize)
intializePageSize();
- m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+ if (isValid())
+ m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+#if !ENABLE(INTERPRETER)
+ else
+ CRASH();
+#endif
}
+ bool isValid() const;
+
PassRefPtr<ExecutablePool> poolForSize(size_t n)
{
// Try to fit in the existing small allocator
+ ASSERT(m_smallAllocationPool);
if (n < m_smallAllocationPool->available())
return m_smallAllocationPool;
diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 7846a25..65c9c13 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -306,20 +306,30 @@ public:
randomLocation <<= 21;
#endif
m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
- if (!m_base)
- CRASH();
- // For simplicity, we keep all memory in m_freeList in a 'released' state.
- // This means that we can simply reuse all memory when allocating, without
- // worrying about it's previous state, and also makes coalescing m_freeList
- // simpler since we need not worry about the possibility of coalescing released
- // chunks with non-released ones.
- release(m_base, m_totalHeapSize);
- m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ if (m_base) {
+ // For simplicity, we keep all memory in m_freeList in a 'released' state.
+ // This means that we can simply reuse all memory when allocating, without
+ // worrying about it's previous state, and also makes coalescing m_freeList
+ // simpler since we need not worry about the possibility of coalescing released
+ // chunks with non-released ones.
+ release(m_base, m_totalHeapSize);
+ m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ }
+#if !ENABLE(INTERPRETER)
+ else
+ CRASH();
+#endif
}
void* alloc(size_t size)
{
+#if ENABLE(INTERPRETER)
+ if (!m_base)
+ return 0;
+#else
+ ASSERT(m_base);
+#endif
void* result;
// Freed allocations of the common size are not stored back into the main
@@ -382,6 +392,7 @@ public:
void free(void* pointer, size_t size)
{
+ ASSERT(m_base);
// Call release to report to the operating system that this
// memory is no longer in use, and need not be paged out.
ASSERT(isWithinVMPool(pointer, size));
@@ -404,6 +415,8 @@ public:
}
}
+ bool isValid() const { return !!m_base; }
+
private:
#ifndef NDEBUG
@@ -435,19 +448,26 @@ void ExecutableAllocator::intializePageSize()
static FixedVMPoolAllocator* allocator = 0;
static SpinLock spinlock = SPINLOCK_INITIALIZER;
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+bool ExecutableAllocator::isValid() const
{
- SpinLockHolder lock_holder(&spinlock);
-
+ SpinLockHolder lock_holder(&spinlock);
if (!allocator)
allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+ return allocator->isValid();
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ SpinLockHolder lock_holder(&spinlock);
+
+ ASSERT(allocator);
ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
return alloc;
}
void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
{
- SpinLockHolder lock_holder(&spinlock);
+ SpinLockHolder lock_holder(&spinlock);
ASSERT(allocator);
allocator->free(allocation.pages, allocation.size);
diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index b04049c..a841d32 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -55,6 +55,11 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
ASSERT_UNUSED(result, !result);
}
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
}
#endif
diff --git a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp b/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
index 9028f50..8b0553d 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
@@ -66,6 +66,11 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
delete alloc.chunk;
}
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
#endif
diff --git a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
index 72a1d5f..2b13529 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -54,6 +54,11 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
VirtualFree(alloc.pages, 0, MEM_RELEASE);
}
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
#endif
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index 5d96847..f5df5f7 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -637,7 +637,7 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
}
// patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
}
void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
@@ -657,7 +657,7 @@ void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBloc
}
// patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualConstruct());
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
}
#endif // ENABLE(JIT_OPTIMIZE_CALL)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index 0980be2..f9be930 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -219,12 +219,16 @@ namespace JSC {
static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
{
+ if (!globalData->canUseJIT())
+ return;
JIT jit(globalData);
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
}
static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
{
+ if (!globalData->canUseJIT())
+ return CodePtr();
JIT jit(globalData);
return jit.privateCompileCTINativeCall(executablePool, globalData, func);
}
@@ -352,12 +356,8 @@ namespace JSC {
static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
static const int patchOffsetGetByIdPutResult = 28;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 35;
-#elif ENABLE(OPCODE_SAMPLING)
+#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 37;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 25;
#else
static const int patchOffsetGetByIdSlowCaseCall = 27;
#endif
@@ -549,12 +549,8 @@ namespace JSC {
static const int patchLengthGetByIdExternalLoad = 3;
static const int patchOffsetGetByIdPropertyMapOffset = 22;
static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 31;
-#elif ENABLE(OPCODE_SAMPLING)
+#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 33;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 21;
#else
static const int patchOffsetGetByIdSlowCaseCall = 23;
#endif
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 0e5bb45..0f6d290 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -1203,7 +1203,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
addSlowCase(branch32(Equal, regT2, Imm32(1)));
- emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
+ emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
emitPutVirtualRegister(result, regT0);
#else
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 4f36d66..232e287 100644
--- a/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -1370,7 +1370,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
addSlowCase(branch32(Equal, regT2, Imm32(0)));
- emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
+ emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
#else
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index 5c2b308..368eab9 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -81,7 +81,7 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
addPtr(callFrameRegister, regT3);
storePtr(callFrameRegister, regT3);
addPtr(regT2, callFrameRegister);
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
}
@@ -132,7 +132,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -231,7 +231,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
// Done! - return back to the hot path.
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
diff --git a/JavaScriptCore/jit/JITCall32_64.cpp b/JavaScriptCore/jit/JITCall32_64.cpp
index 5f551cc..aa8e987 100644
--- a/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/JavaScriptCore/jit/JITCall32_64.cpp
@@ -82,7 +82,7 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
move(regT2, regT1); // argCount
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
}
@@ -232,7 +232,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -333,7 +333,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
// Done! - return back to the hot path.
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 04f7158..3b28f34 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -181,18 +181,12 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
#endif
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-ALWAYS_INLINE void JIT::restoreArgumentReference()
-{
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
-#else
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
}
+
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if CPU(X86)
@@ -203,7 +197,6 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
}
-#endif
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 8e86d40..949dee3 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -73,14 +73,14 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// VirtualCallLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- JumpList callLazyLinkFailures;
+ JumpList callLinkFailures;
Label virtualCallLinkBegin = align();
compileOpCallInitializeCallFrame();
preserveReturnAddressAfterCall(regT3);
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
restoreArgumentReference();
Call callLazyLinkCall = call();
- callLazyLinkFailures.append(branchTestPtr(Zero, regT0));
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
restoreReturnAddressBeforeReturn(regT3);
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
@@ -93,24 +93,11 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
restoreArgumentReference();
Call callLazyLinkConstruct = call();
- callLazyLinkFailures.append(branchTestPtr(Zero, regT0));
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
restoreReturnAddressBeforeReturn(regT3);
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
- // If the parser fails we want to be able to be able to keep going,
- // So we handle this as a parse failure.
- callLazyLinkFailures.link(this);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
- ret();
-
-
// VirtualCall Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallBegin = align();
@@ -122,6 +109,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -141,6 +129,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileConstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -148,6 +137,18 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ ret();
// NativeCall Trampoline
Label nativeCallThunk = privateCompileCTINativeCall(globalData);
@@ -307,7 +308,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
{
- return globalData->jitStubs.ctiNativeCall();
+ return globalData->jitStubs->ctiNativeCall();
}
void JIT::emit_op_mov(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
index a44a8a1..658ebc5 100644
--- a/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -64,10 +64,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
ret();
#endif
-
+
+ JumpList callLinkFailures;
// (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
#if ENABLE(JIT_OPTIMIZE_CALL)
- JumpList callLazyLinkFailures;
// VirtualCallLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallLinkBegin = align();
@@ -76,7 +76,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
restoreArgumentReference();
Call callLazyLinkCall = call();
- callLazyLinkFailures.append(branchTestPtr(Zero, regT0));
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
restoreReturnAddressBeforeReturn(regT3);
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
@@ -90,22 +90,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
restoreArgumentReference();
Call callLazyLinkConstruct = call();
restoreReturnAddressBeforeReturn(regT3);
- callLazyLinkFailures.append(branchTestPtr(Zero, regT0));
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
- // If the parser fails we want to be able to be able to keep going,
- // So we handle this as a parse failure.
- callLazyLinkFailures.link(this);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
- ret();
-
#endif // ENABLE(JIT_OPTIMIZE_CALL)
// VirtualCall Trampoline
@@ -119,6 +107,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -138,6 +127,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCconstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -145,6 +135,18 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ ret();
// NativeCall Trampoline
Label nativeCallThunk = privateCompileCTINativeCall(globalData);
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index 85bd54f..f088b6e 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -139,9 +139,7 @@ asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
-#endif
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x3c, %esp" "\n"
"popl %ebx" "\n"
@@ -165,10 +163,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(X86_64)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
@@ -235,10 +229,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
-#endif
-
#define THUNK_RETURN_ADDRESS_OFFSET 0x3C
#define PRESERVED_RETURN_ADDRESS_OFFSET 0x40
#define PRESERVED_R4_OFFSET 0x44
@@ -256,10 +246,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(MSVC) && CPU(X86)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
@@ -358,9 +344,7 @@ asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
-#endif
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x1c, %esp" "\n"
"popl %ebx" "\n"
@@ -384,10 +368,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(X86_64)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
@@ -461,10 +441,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
-#endif
-
#define THUNK_RETURN_ADDRESS_OFFSET 0x1C
#define PRESERVED_RETURN_ADDRESS_OFFSET 0x20
#define PRESERVED_R4_OFFSET 0x24
@@ -482,10 +458,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif CPU(MIPS)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on MIPS."
-#endif
-
asm volatile(
".text" "\n"
".align 2" "\n"
@@ -620,10 +592,6 @@ __asm void ctiOpThrowNotCaught()
#elif COMPILER(MSVC) && CPU(X86)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
@@ -797,8 +765,11 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
JITThunks::JITThunks(JSGlobalData* globalData)
{
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
+ if (!globalData->executableAllocator.isValid())
+ return;
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
+ ASSERT(m_executablePool);
#if CPU(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
@@ -915,7 +886,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
// The tradeoff of compiling an patched inline string length access routine does not seem
// to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
return;
}
@@ -985,12 +956,6 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
-#else
-#define SETUP_VA_LISTL_ARGS
-#endif
-
#ifndef NDEBUG
extern "C" {
@@ -1021,13 +986,13 @@ struct StackHack {
ReturnAddressPtr savedReturnAddress;
};
-#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
#else
-#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
@@ -1858,8 +1823,11 @@ DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
ASSERT(!function->isHostFunction());
FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- executable->jitCodeForCall(stackFrame.callFrame, callDataScopeChain);
-
+ JSObject* error = executable->compileForCall(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
return function;
}
@@ -1876,8 +1844,11 @@ DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
ASSERT(!function->isHostFunction());
FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- executable->jitCodeForConstruct(stackFrame.callFrame, callDataScopeChain);
-
+ JSObject* error = executable->compileForConstruct(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
return function;
}
@@ -2013,12 +1984,12 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
codePtr = executable->generatedJITCodeForCall().addressForCall();
else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- codeBlock = functionExecutable->bytecodeForCall(stackFrame.callFrame, callee->scope().node());
- if (!codeBlock) {
- stackFrame.callFrame->globalData().exception = createStackOverflowError(callFrame);
+ JSObject* error = functionExecutable->compileForCall(callFrame, callee->scope().node());
+ if (error) {
+ callFrame->globalData().exception = createStackOverflowError(callFrame);
return 0;
}
- functionExecutable->jitCodeForCall(callFrame, callee->scope().node());
+ codeBlock = &functionExecutable->generatedBytecodeForCall();
if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
codePtr = functionExecutable->generatedJITCodeForCall().addressForCall();
else
@@ -2047,12 +2018,12 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
codePtr = executable->generatedJITCodeForConstruct().addressForCall();
else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- codeBlock = functionExecutable->bytecodeForConstruct(stackFrame.callFrame, callee->scope().node());
- if (!codeBlock) {
+ JSObject* error = functionExecutable->compileForConstruct(callFrame, callee->scope().node());
+ if (error) {
throwStackOverflowError(callFrame, stackFrame.globalData, ReturnAddressPtr(callFrame->returnPC()), STUB_RETURN_ADDRESS);
- VM_THROW_EXCEPTION();
+ return 0;
}
- functionExecutable->jitCodeForConstruct(callFrame, callee->scope().node());
+ codeBlock = &functionExecutable->generatedBytecodeForConstruct();
if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
codePtr = functionExecutable->generatedJITCodeForConstruct().addressForCall();
else
@@ -3536,8 +3507,10 @@ PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalDat
PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
{
std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
- if (entry.second)
- entry.first->second = NativeExecutable::create(generator(globalData, m_executablePool.get()), function, ctiNativeConstruct(), callHostFunctionAsConstructor);
+ if (entry.second) {
+ MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
+ entry.first->second = NativeExecutable::create(code, function, ctiNativeConstruct(), callHostFunctionAsConstructor);
+ }
return entry.first->second;
}
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index a5b21e5..ba9e15f 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -225,26 +225,15 @@ namespace JSC {
#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
- #define STUB_ARGS_DECLARATION void* args, ...
- #define STUB_ARGS (reinterpret_cast<void**>(vl_args) - 1)
-
- #if COMPILER(MSVC)
- #define JIT_STUB __cdecl
- #else
- #define JIT_STUB
- #endif
+#define STUB_ARGS_DECLARATION void** args
+#define STUB_ARGS (args)
+
+#if CPU(X86) && COMPILER(MSVC)
+#define JIT_STUB __fastcall
+#elif CPU(X86) && COMPILER(GCC) && !OS(WINDOWS)
+#define JIT_STUB __attribute__ ((fastcall))
#else
- #define STUB_ARGS_DECLARATION void** args
- #define STUB_ARGS (args)
-
- #if CPU(X86) && COMPILER(MSVC)
- #define JIT_STUB __fastcall
- #elif CPU(X86) && COMPILER(GCC)
- #define JIT_STUB __attribute__ ((fastcall))
- #else
- #define JIT_STUB
- #endif
+#define JIT_STUB
#endif
extern "C" void ctiVMThrowTrampoline();
diff --git a/JavaScriptCore/jit/ThunkGenerators.cpp b/JavaScriptCore/jit/ThunkGenerators.cpp
index 271c7c1..20857cb 100644
--- a/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -68,7 +68,7 @@ MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, Executa
SpecializedThunkJIT jit(1, globalData, pool);
stringCharLoad(jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs.ctiNativeCall());
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
}
MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
@@ -77,7 +77,7 @@ MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutableP
stringCharLoad(jit);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs.ctiNativeCall());
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
}
MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
@@ -87,7 +87,7 @@ MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, Execu
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs.ctiNativeCall());
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
}
MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
@@ -95,15 +95,15 @@ MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePoo
#if USE(JSVALUE64) || USE(JSVALUE32_64)
SpecializedThunkJIT jit(1, globalData, pool);
if (!jit.supportsFloatingPointSqrt())
- return globalData->jitStubs.ctiNativeCall();
+ return globalData->jitStubs->ctiNativeCall();
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(globalData->jitStubs.ctiNativeCall());
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
#else
UNUSED_PARAM(pool);
- return globalData->jitStubs.ctiNativeCall();
+ return globalData->jitStubs->ctiNativeCall();
#endif
}
@@ -115,7 +115,7 @@ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool
#if USE(JSVALUE64) || USE(JSVALUE32_64)
SpecializedThunkJIT jit(2, globalData, pool);
if (!jit.supportsFloatingPoint())
- return globalData->jitStubs.ctiNativeCall();
+ return globalData->jitStubs->ctiNativeCall();
jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
@@ -148,10 +148,10 @@ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(globalData->jitStubs.ctiNativeCall());
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
#else
UNUSED_PARAM(pool);
- return globalData->jitStubs.ctiNativeCall();
+ return globalData->jitStubs->ctiNativeCall();
#endif
}