summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
authorKristian Monsen <kristianm@google.com>2010-05-21 16:53:46 +0100
committerKristian Monsen <kristianm@google.com>2010-05-25 10:24:15 +0100
commit6c2af9490927c3c5959b5cb07461b646f8b32f6c (patch)
treef7111b9b22befab472616c1d50ec94eb50f1ec8c /JavaScriptCore/jit
parenta149172322a9067c14e8b474a53e63649aa17cad (diff)
downloadexternal_webkit-6c2af9490927c3c5959b5cb07461b646f8b32f6c.zip
external_webkit-6c2af9490927c3c5959b5cb07461b646f8b32f6c.tar.gz
external_webkit-6c2af9490927c3c5959b5cb07461b646f8b32f6c.tar.bz2
Merge WebKit at r59636: Initial merge by git
Change-Id: I59b289c4e6b18425f06ce41cc9d34c522515de91
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/JIT.cpp44
-rw-r--r--JavaScriptCore/jit/JIT.h10
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp4
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp4
-rw-r--r--JavaScriptCore/jit/JITCall.cpp8
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h9
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp1653
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp1603
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp59
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp43
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp143
-rw-r--r--JavaScriptCore/jit/JITStubs.h11
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h15
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h2
14 files changed, 2032 insertions, 1576 deletions
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index f7b06a0..c7a4056 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -275,6 +275,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_new_func)
DEFINE_OP(op_new_func_exp)
DEFINE_OP(op_new_object)
+ DEFINE_OP(op_new_regexp)
DEFINE_OP(op_next_pname)
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
@@ -297,6 +298,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_resolve)
DEFINE_OP(op_resolve_base)
DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_global_dynamic)
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_ret)
@@ -363,9 +365,7 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
-#if USE(JSVALUE32_64)
m_globalResolveInfoIndex = 0;
-#endif
m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
@@ -425,9 +425,8 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_pre_inc)
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
-#if USE(JSVALUE32_64)
DEFINE_SLOWCASE_OP(op_resolve_global)
-#endif
+ DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
@@ -466,31 +465,30 @@ JITCode JIT::privateCompile()
preserveReturnAddressAfterCall(regT2);
emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
- Jump slowRegisterFileCheck;
- Label afterRegisterFileCheck;
+ Jump registerFileCheck;
if (m_codeBlock->codeType() == FunctionCode) {
// In the case of a fast linked call, we do not set this up in the caller.
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
- peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*));
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
-
- slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end)));
- afterRegisterFileCheck = label();
+ registerFileCheck = branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().
+ m_end), regT1);
}
+ Label functionBody = label();
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
if (m_codeBlock->codeType() == FunctionCode) {
- slowRegisterFileCheck.link(this);
+ registerFileCheck.link(this);
m_bytecodeIndex = 0;
JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
- jump(afterRegisterFileCheck);
+ jump(functionBody);
}
ASSERT(m_jmpTable.isEmpty());
@@ -589,7 +587,7 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
#endif
#if ENABLE(JIT_OPTIMIZE_CALL)
-void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+void JIT::unlinkCallOrConstruct(CallLinkInfo* callLinkInfo)
{
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
@@ -621,6 +619,26 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
// patch the call so we do not continue to try to link.
repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
}
+
+void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
+ ASSERT(!callLinkInfo->isLinked());
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->addCaller(callLinkInfo);
+
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall());
+ }
+
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualConstruct());
+}
#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index a7e8890..b857351 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -234,7 +234,8 @@ namespace JSC {
}
static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
- static void unlinkCall(CallLinkInfo*);
+ static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+ static void unlinkCallOrConstruct(CallLinkInfo*);
private:
struct JSRInfo {
@@ -447,7 +448,6 @@ namespace JSC {
Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
#endif
void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithImmToInt(RegisterID);
void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
void emitTagAsBoolImmediate(RegisterID reg);
@@ -695,6 +695,7 @@ namespace JSC {
void emit_op_new_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
+ void emit_op_new_regexp(Instruction*);
void emit_op_get_pnames(Instruction*);
void emit_op_next_pname(Instruction*);
void emit_op_not(Instruction*);
@@ -717,7 +718,8 @@ namespace JSC {
void emit_op_put_setter(Instruction*);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
- void emit_op_resolve_global(Instruction*);
+ void emit_op_resolve_global(Instruction*, bool dynamic = false);
+ void emit_op_resolve_global_dynamic(Instruction*);
void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_ret(Instruction*);
@@ -782,6 +784,7 @@ namespace JSC {
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -902,6 +905,7 @@ namespace JSC {
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ static PassRefPtr<NativeExecutable> stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
} JIT_CLASS_ALIGNMENT;
inline void JIT::emit_op_loop(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index e5a4620..cd39b3a 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -370,7 +370,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(op1, regT0);
stubCall.addArgument(op2, regT1);
stubCall.call();
@@ -585,7 +585,7 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(op1, regT0);
stubCall.addArgument(op2, regT1);
stubCall.call();
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 962c066..97c8d32 100644
--- a/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -292,7 +292,7 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(invert ? LessThan : GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
@@ -1135,7 +1135,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
break;
case op_jlesseq:
emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
break;
default:
ASSERT_NOT_REACHED();
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index da603bd..9979c8e 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -267,7 +267,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -400,7 +400,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
// Put the return value in dst.
emitStore(dst, regT1, regT0);;
@@ -563,7 +563,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -691,7 +691,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
move(regT0, regT2);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
// Put the return value in dst.
emitPutVirtualRegister(dst);
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 892ab36..148f615 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -820,15 +820,6 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
#endif
}
-ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
-{
-#if USE(JSVALUE64)
- UNUSED_PARAM(reg);
-#else
- rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
-#endif
-}
-
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 2ad79c6..9b0f780 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -39,7 +39,10 @@
namespace JSC {
-#if USE(JSVALUE32_64)
+#if !USE(JSVALUE32_64)
+
+#define RECORD_JUMP_TARGET(targetOffset) \
+ do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
{
@@ -48,1587 +51,170 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
softModulo();
#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) This function provides fast property access for string length
+ // (2) The second function provides fast property access for string length
Label stringLengthBegin = align();
-
- // regT0 holds payload, regT1 holds tag
-
- Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
- Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
ret();
#endif
- // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-#if ENABLE(JIT_OPTIMIZE_CALL)
// VirtualCallLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallLinkBegin = align();
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump isNativeFunc1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
- Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump hasCodeBlock1 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
- Call callJSFunction2 = call();
+ Call callJSFunction1 = call();
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
+ hasCodeBlock1.link(this);
// Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ Jump arityCheckOkay1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), regT1);
preserveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
- Call callArityCheck2 = call();
+ Call callArityCheck1 = call();
move(regT1, callFrameRegister);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
+ arityCheckOkay1.link(this);
- isNativeFunc2.link(this);
+ isNativeFunc1.link(this);
compileOpCallInitializeCallFrame();
-
preserveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
- Call callLazyLinkCall = call();
+ Call callLazyLinkCall1 = call();
restoreReturnAddressBeforeReturn(regT3);
jump(regT0);
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
- // VirtualCall Trampoline
+ // VirtualConstructLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
+ Label virtualConstructLinkBegin = align();
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
- Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
- Call callJSFunction1 = call();
+ Call callJSFunction2 = call();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
-
+ hasCodeBlock2.link(this);
+
// Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), regT1);
preserveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
- Call callArityCheck1 = call();
+ Call callArityCheck2 = call();
move(regT1, callFrameRegister);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- arityCheckOkay3.link(this);
+ arityCheckOkay2.link(this);
- isNativeFunc3.link(this);
+ isNativeFunc2.link(this);
compileOpCallInitializeCallFrame();
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
- jump(regT0);
-
-#if CPU(X86) || CPU(ARM_TRADITIONAL)
- Label nativeCallThunk = align();
- preserveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
-
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-#if CPU(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-
-#if COMPILER(MSVC) || OS(LINUX)
-#if COMPILER(MSVC)
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#if COMPILER(MSVC)
-#pragma pack(pop)
-#endif // COMPILER(MSVC)
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
-
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
- storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-
-#if COMPILER(MSVC) || OS(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
- storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86Registers::edx);
-
- call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // JSValue is a non-POD type, so eax points to it
- emitLoad(0, regT1, regT0, X86Registers::eax);
-#else
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
- move(callFrameRegister, X86Registers::ecx); // callFrame
- call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-
-#elif CPU(ARM_TRADITIONAL)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- // Allocate stack space for our arglist
- COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0 && sizeof(JSValue) == 8 && sizeof(Register) == 8, ArgList_should_by_8byte_aligned);
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- move(callFrameRegister, regT1);
- sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
-
- // push pointer to arguments
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
-
- // Argument passing method:
- // r0 - points to return value
- // r1 - callFrame
- // r2 - callee
- // stack: this(JSValue) and a pointer to ArgList
-
-#if OS(WINCE)
- // Setup arg4:
- push(stackPointerRegister);
-
- // Setup arg3:
- // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
- load32(Address(regT1, -(int32_t)sizeof(void*) * 2), ARMRegisters::r3);
- push(ARMRegisters::r3);
- load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
- storePtr(regT3, Address(stackPointerRegister));
-
- // Setup arg2:
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
-
- // Setup arg1:
- move(callFrameRegister, regT1);
-
- // Setup arg0:
- move(stackPointerRegister, regT0);
-
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- load32(Address(stackPointerRegister, 0), regT0);
- load32(Address(stackPointerRegister, 4), regT1);
-
- addPtr(Imm32(sizeof(ArgList) + 8), stackPointerRegister);
-#else // OS(WINCE)
- move(stackPointerRegister, regT3);
- subPtr(Imm32(8), stackPointerRegister);
- move(stackPointerRegister, regT0);
- subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister);
-
- // Setup arg4:
- storePtr(regT3, Address(stackPointerRegister, 8));
-
- // Setup arg3:
- // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
- load32(Address(regT1, -(int32_t)sizeof(void*) * 2), regT3);
- storePtr(regT3, Address(stackPointerRegister, 0));
- load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
- storePtr(regT3, Address(stackPointerRegister, 4));
-
- // Setup arg2:
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
-
- // Setup arg1:
- move(callFrameRegister, regT1);
-
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // Load return value
- load32(Address(stackPointerRegister, 16), regT0);
- load32(Address(stackPointerRegister, 20), regT1);
-
- addPtr(Imm32(sizeof(ArgList) + 16 + 8), stackPointerRegister);
-#endif // OS(WINCE)
-
-#endif
-
- // Check for an exception
- move(ImmPtr(&globalData->exception), regT2);
- Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callLazyLinkCall2 = call();
restoreReturnAddressBeforeReturn(regT3);
- ret();
-
- // Handle an exception
- sawException.link(this);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
-#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
-#endif
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
-#else
- UNUSED_PARAM(ctiVirtualCallLink);
-#endif
-#if ENABLE(JIT_OPTIMIZE_MOD)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
-#endif
-}
-
-void JIT::emit_op_mov(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- if (m_codeBlock->isConstantRegisterIndex(src))
- emitStore(dst, getConstantOperand(src));
- else {
- emitLoad(src, regT1, regT0);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
- }
-}
-
-void JIT::emit_op_end(Instruction* currentInstruction)
-{
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_end).call();
- ASSERT(returnValueRegister != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
- ret();
-}
-
-void JIT::emit_op_jmp(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target);
-}
-
-void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- emitTimeoutCheck();
-
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, regT2), target);
-}
-
-void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_new_object(Instruction* currentInstruction)
-{
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_instanceof(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- // Load the operands into registers.
- // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitLoadPayload(value, regT2);
- emitLoadPayload(baseVal, regT0);
- emitLoadPayload(proto, regT1);
-
- // Check that value, baseVal, and proto are cells.
- emitJumpSlowCaseIfNotJSCell(value);
- emitJumpSlowCaseIfNotJSCell(baseVal);
- emitJumpSlowCaseIfNotJSCell(proto);
-
- // Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
-
- // Optimistically load the result true, and start looping.
- // Initially, regT1 still contains proto and regT2 still contains value.
- // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(Imm32(JSValue::TrueTag), regT0);
- Label loop(this);
-
- // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
- // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- Jump isInstance = branchPtr(Equal, regT2, regT1);
- branchTest32(NonZero, regT2).linkTo(loop, this);
-
- // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(Imm32(JSValue::FalseTag), regT0);
-
- // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
- isInstance.link(this);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCaseIfNotJSCell(iter, proto);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value);
- stubCall.addArgument(baseVal);
- stubCall.addArgument(proto);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_new_func(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[3].u.operand;
-
- loadPtr(&globalObject->d()->registers, regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
-{
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- loadPtr(&globalObject->d()->registers, regT2);
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
-}
-
-void JIT::emit_op_tear_off_arguments(Instruction*)
-{
- JITStubCall(this, cti_op_tear_off_arguments).call();
-}
-
-void JIT::emit_op_new_array(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_to_primitive(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- isImm.link(this);
-
- if (dst != src)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_strcat(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction)
-{
- // FIXME: Optimize to use patching instead of so many memory accesses.
-
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
- void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
- void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
- // Verify structure.
- move(ImmPtr(globalObject), regT0);
- loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
-
- // Load property.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
- load32(offsetAddr, regT3);
- load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
- load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_not(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoadTag(src, regT0);
-
- xor32(Imm32(JSValue::FalseTag), regT0);
- addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
- xor32(Imm32(JSValue::TrueTag), regT0);
-
- emitStoreBool(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(src);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jfalse(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isTrue.link(this);
- isTrue2.link(this);
-}
-
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
-}
-
-void JIT::emit_op_jtrue(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isFalse.link(this);
- isFalse2.link(this);
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jeq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- addJump(branchTest32(NonZero, regT1), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- addJump(branchTest32(Zero, regT1), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
- unsigned target = currentInstruction[3].u.operand;
-
- emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
-}
-
-void JIT::emit_op_jsr(Instruction* currentInstruction)
-{
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
-}
-
-void JIT::emit_op_sret(Instruction* currentInstruction)
-{
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
-}
-
-void JIT::emit_op_eq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8(Equal, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call();
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(op1);
- stubCallEq.addArgument(op2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_neq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8(NotEqual, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call(regT0);
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(regT1, regT0);
- stubCallEq.addArgument(regT3, regT2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- xor32(Imm32(0x1), regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoadTag(src1, regT0);
- emitLoadTag(src2, regT1);
-
- // Jump to a slow case if either operand is double, or if both operands are
- // cells and/or Int32s.
- move(regT0, regT2);
- and32(regT1, regT2);
- addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
- addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
-
- if (type == OpStrictEq)
- set8(Equal, regT0, regT1, regT0);
- else
- set8(NotEqual, regT0, regT1, regT0);
-
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_stricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpStrictEq);
-}
-
-void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_nstricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpNStrictEq);
-}
-
-void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_eq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_neq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
- set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
- and32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_throw(Instruction* currentInstruction)
-{
- unsigned exception = currentInstruction[1].u.operand;
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(exception);
- stubCall.call();
-
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitLoad(base, regT1, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- if (base != m_codeBlock->thisRegister()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT1, regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(Imm32(JSValue::CellTag), tagFor(dst));
- store32(regT2, payloadFor(dst));
-
- // Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
-
- // Verify that i is valid:
- loadPtr(addressFor(base), regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- loadPtr(addressFor(dst), regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
-void JIT::emit_op_push_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_scope);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
-}
-
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
- isInt32.link(this);
-
- if (src != dst)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_jsnumber);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_catch(Instruction* currentInstruction)
-{
- unsigned exception = currentInstruction[1].u.operand;
-
- // This opcode only executes after a return from cti_op_throw.
-
- // cti_op_throw may have taken us to a call frame further up the stack; reload
- // the call frame pointer to adjust.
- peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-
- // Now store the exception returned by cti_op_throw.
- emitStore(exception, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
-}
-
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_switch_imm(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_char(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
jump(regT0);
-}
-
-void JIT::emit_op_switch_string(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
-
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_new_error(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned type = currentInstruction[2].u.operand;
- unsigned message = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_new_error);
- stubCall.addArgument(Imm32(type));
- stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.addArgument(Imm32(m_bytecodeIndex));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_debug(Instruction* currentInstruction)
-{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call();
-#endif
-}
-
-
-void JIT::emit_op_enter(Instruction*)
-{
- // Even though JIT code doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- emitStore(i, jsUndefined());
-}
-
-void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
-{
- emit_op_enter(currentInstruction);
-
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_create_arguments(Instruction*)
-{
- Jump argsCreated = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::EmptyValueTag));
-
- // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
-
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_arguments(Instruction*)
-{
- emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
-}
-
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- emitLoad(thisRegister, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-
- map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
-}
-
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(thisRegister);
-}
-
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-#else // USE(JSVALUE32_64)
-
-#define RECORD_JUMP_TARGET(targetOffset) \
- do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
-{
-#if ENABLE(JIT_OPTIMIZE_MOD)
- Label softModBegin = align();
- softModulo();
-#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- // VirtualCallLink Trampoline
+ // VirtualCall Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallLinkBegin = align();
+ Label virtualCallBegin = align();
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
- Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
- Call callJSFunction2 = call();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ Call callJSFunction3 = call();
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
// Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), regT1);
preserveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
- Call callArityCheck2 = call();
+ Call callArityCheck3 = call();
move(regT1, callFrameRegister);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ arityCheckOkay3.link(this);
- isNativeFunc2.link(this);
+ isNativeFunc3.link(this);
compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCall)), regT0);
jump(regT0);
- // VirtualCall Trampoline
+ // VirtualConstruct Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
+ Label virtualConstructBegin = align();
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump isNativeFunc4 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
- Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump hasCodeBlock4 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
- Call callJSFunction1 = call();
+ Call callJSFunction4 = call();
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
+ hasCodeBlock4.link(this);
// Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ Jump arityCheckOkay4 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), regT1);
preserveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 1); // return address
restoreArgumentReference();
- Call callArityCheck1 = call();
+ Call callArityCheck4 = call();
move(regT1, callFrameRegister);
emitGetJITStubArg(2, regT1); // argCount
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- arityCheckOkay3.link(this);
+ arityCheckOkay4.link(this);
- isNativeFunc3.link(this);
+ isNativeFunc4.link(this);
compileOpCallInitializeCallFrame();
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstruct)), regT0);
jump(regT0);
+ // NativCall Trampoline
Label nativeCallThunk = align();
preserveReturnAddressAfterCall(regT0);
emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
@@ -1929,27 +515,32 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callLazyLinkCall1, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_construct_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_construct_jitCompile));
+ patchBuffer.link(callLazyLinkCall2, FunctionPtr(cti_vm_lazyLinkConstruct));
#endif
+ patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callArityCheck4, FunctionPtr(cti_op_construct_arityCheck));
+ patchBuffer.link(callJSFunction4, FunctionPtr(cti_op_construct_jitCompile));
CodeRef finalCode = patchBuffer.finalizeCode();
*executablePool = finalCode.m_executablePool;
trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin);
trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
#if ENABLE(JIT_OPTIMIZE_MOD)
trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiStringLengthTrampoline);
#endif
}
@@ -2244,12 +835,10 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
{
// Fast case
void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
-
unsigned currentIndex = m_globalResolveInfoIndex++;
void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
@@ -2257,7 +846,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
// Check Structure of global object
move(ImmPtr(globalObject), regT0);
loadPtr(structureAddress, regT1);
- Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); // Structures don't match
// Load cached property
// Assume that the global object always uses external storage.
@@ -2265,16 +854,22 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
load32(offsetAddr, regT1);
loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
- Jump end = jump();
+}
- // Slow case
- noMatch.link(this);
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(currentInstruction[1].u.operand);
- end.link(this);
+ stubCall.call(dst);
}
void JIT::emit_op_not(Instruction* currentInstruction)
@@ -2299,7 +894,8 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
isNonZero.link(this);
RECORD_JUMP_TARGET(target);
-};
+}
+
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
@@ -2888,24 +1484,6 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned op2 = currentInstruction[2].u.operand;
@@ -3090,7 +1668,50 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
stubCall.call(currentInstruction[1].u.operand);
}
-#endif // USE(JSVALUE32_64)
+#endif // !USE(JSVALUE32_64)
+
+void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[6].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ while (skip--) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ }
+ emit_op_resolve_global(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+ int skip = currentInstruction[6].u.operand + m_codeBlock->needsFullScopeChain();
+ while (skip--)
+ linkSlowCase(iter);
+ JITStubCall resolveStubCall(this, cti_op_resolve);
+ resolveStubCall.addArgument(ImmPtr(ident));
+ resolveStubCall.call(dst);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
// For both JSValue32_64 and JSValue32
#if ENABLE(JIT_OPTIMIZE_MOD)
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
new file mode 100644
index 0000000..d4edb92
--- /dev/null
+++ b/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -0,0 +1,1603 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT) && USE(JSVALUE32_64)
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+#if ENABLE(JIT_OPTIMIZE_MOD)
+ Label softModBegin = align();
+ softModulo();
+#endif
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+
+ ret();
+#endif
+
+ // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+
+ Jump hasCodeBlock1 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock1.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay1.link(this);
+
+ isNativeFunc1.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callLazyLinkCall1 = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock2.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay2.link(this);
+
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callLazyLinkCall2 = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction3 = call();
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck3 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ arityCheckOkay3.link(this);
+
+ isNativeFunc3.link(this);
+
+ compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCall)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc4 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+
+ Jump hasCodeBlock4 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction4 = call();
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock4.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay4 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck4 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ arityCheckOkay4.link(this);
+
+ isNativeFunc4.link(this);
+
+ compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstruct)), regT0);
+ jump(regT0);
+
+#if CPU(X86) || CPU(ARM_TRADITIONAL)
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+#if CPU(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+
+#if COMPILER(MSVC) || OS(LINUX)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
+ storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+#if COMPILER(MSVC) || OS(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86Registers::edx);
+
+ call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type, so eax points to it
+ emitLoad(0, regT1, regT0, X86Registers::eax);
+#else
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
+ move(callFrameRegister, X86Registers::ecx); // callFrame
+ call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif CPU(ARM_TRADITIONAL)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ // Allocate stack space for our arglist
+ COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0 && sizeof(JSValue) == 8 && sizeof(Register) == 8, ArgList_should_by_8byte_aligned);
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ move(callFrameRegister, regT1);
+ sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+
+ // push pointer to arguments
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // Argument passing method:
+ // r0 - points to return value
+ // r1 - callFrame
+ // r2 - callee
+ // stack: this(JSValue) and a pointer to ArgList
+
+#if OS(WINCE)
+ // Setup arg4:
+ push(stackPointerRegister);
+
+ // Setup arg3:
+ // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ load32(Address(regT1, -(int32_t)sizeof(void*) * 2), ARMRegisters::r3);
+ push(ARMRegisters::r3);
+ load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
+ storePtr(regT3, Address(stackPointerRegister));
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
+
+ // Setup arg1:
+ move(callFrameRegister, regT1);
+
+ // Setup arg0:
+ move(stackPointerRegister, regT0);
+
+ call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ load32(Address(stackPointerRegister, 0), regT0);
+ load32(Address(stackPointerRegister, 4), regT1);
+
+ addPtr(Imm32(sizeof(ArgList) + 8), stackPointerRegister);
+#else // OS(WINCE)
+ move(stackPointerRegister, regT3);
+ subPtr(Imm32(8), stackPointerRegister);
+ move(stackPointerRegister, regT0);
+ subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister);
+
+ // Setup arg4:
+ storePtr(regT3, Address(stackPointerRegister, 8));
+
+ // Setup arg3:
+ // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ load32(Address(regT1, -(int32_t)sizeof(void*) * 2), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 0));
+ load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 4));
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
+
+ // Setup arg1:
+ move(callFrameRegister, regT1);
+
+ call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // Load return value
+ load32(Address(stackPointerRegister, 16), regT0);
+ load32(Address(stackPointerRegister, 20), regT1);
+
+ addPtr(Imm32(sizeof(ArgList) + 16 + 8), stackPointerRegister);
+#endif // OS(WINCE)
+
+#endif
+
+ // Check for an exception
+ move(ImmPtr(&globalData->exception), regT2);
+ Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callLazyLinkCall1, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_construct_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_construct_jitCompile));
+ patchBuffer.link(callLazyLinkCall2, FunctionPtr(cti_vm_lazyLinkConstruct));
+#endif
+ patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callArityCheck4, FunctionPtr(cti_op_construct_arityCheck));
+ patchBuffer.link(callJSFunction4, FunctionPtr(cti_op_construct_jitCompile));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
+ trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin);
+#endif
+#if ENABLE(JIT_OPTIMIZE_MOD)
+ trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
+#endif
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(value, regT2);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(proto, regT1);
+
+ // Check that value, baseVal, and proto are cells.
+ emitJumpSlowCaseIfNotJSCell(value);
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+ emitJumpSlowCaseIfNotJSCell(proto);
+
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(Imm32(JSValue::TrueTag), regT0);
+ Label loop(this);
+
+ // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchTest32(NonZero, regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(Imm32(JSValue::FalseTag), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[3].u.operand;
+
+ loadPtr(&globalObject->d()->registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ loadPtr(&globalObject->d()->registers, regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+ JITStubCall(this, cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Verify structure.
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
+ load32(offsetAddr, regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ xor32(Imm32(JSValue::FalseTag), regT0);
+ addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
+ xor32(Imm32(JSValue::TrueTag), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
+ addJump(jump(), target);
+
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
+ } else
+ addSlowCase(isNotInteger);
+
+ isTrue.link(this);
+ isTrue2.link(this);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
+ addJump(jump(), target);
+
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
+ } else
+ addSlowCase(isNotInteger);
+
+ isFalse.link(this);
+ isFalse2.link(this);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(NonZero, regT1), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(Zero, regT1), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(Equal, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(NotEqual, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(Imm32(0x1), regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoadTag(src1, regT0);
+ emitLoadTag(src2, regT1);
+
+ // Jump to a slow case if either operand is double, or if both operands are
+ // cells and/or Int32s.
+ move(regT0, regT2);
+ and32(regT1, regT2);
+ addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
+
+ if (type == OpStrictEq)
+ set8(Equal, regT0, regT1, regT0);
+ else
+ set8(NotEqual, regT0, regT1, regT0);
+
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ if (base != m_codeBlock->thisRegister()) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(Imm32(0), addressFor(i));
+ store32(regT3, addressFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT1, regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(addressFor(i), regT0);
+ Jump end = branch32(Equal, regT0, addressFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(Imm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(Imm32(1), regT0);
+ store32(regT0, addressFor(i));
+
+ // Verify that i is valid:
+ loadPtr(addressFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(Imm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+
+ // This opcode only executes after a return from cti_op_throw.
+
+ // cti_op_throw may have taken us to a call frame further up the stack; reload
+ // the call frame pointer to adjust.
+ peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Now store the exception returned by cti_op_throw.
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned type = currentInstruction[2].u.operand;
+ unsigned message = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_new_error);
+ stubCall.addArgument(Imm32(type));
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+ emit_op_enter(currentInstruction);
+
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+ Jump argsCreated = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::EmptyValueTag));
+
+ // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_arguments(Instruction*)
+{
+ emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+}
+
+#endif // ENABLE(JIT) && USE(JSVALUE32_64)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 3399f03..399afdd 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -52,6 +52,37 @@ using namespace std;
namespace JSC {
+PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
+
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
+ jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(Imm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(&jit, pool);
+ return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+}
+
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -83,6 +114,34 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCodeForCall().addressForCall());
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
{
ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index ec33026..792583b 100644
--- a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -269,6 +269,38 @@ void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator
#endif
+PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
+
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
+ jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.move(Imm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(Imm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(&jit, pool);
+ return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+}
+
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -300,7 +332,18 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+
+ Jump nonCell = jump();
linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCodeForCall().addressForCall());
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index ebd26bb..3d1c272 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -1777,7 +1777,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
-DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
+DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1790,7 +1790,25 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
ASSERT(!function->isHostFunction());
FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- executable->jitCode(stackFrame.callFrame, callDataScopeChain);
+ executable->jitCodeForCall(stackFrame.callFrame, callDataScopeChain);
+
+ return function;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#if !ASSERT_DISABLED
+ CallData callData;
+ ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
+#endif
+
+ JSFunction* function = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!function->isHostFunction());
+ FunctionExecutable* executable = function->jsExecutable();
+ ScopeChainNode* callDataScopeChain = function->scope().node();
+ executable->jitCodeForConstruct(stackFrame.callFrame, callDataScopeChain);
return function;
}
@@ -1802,7 +1820,54 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
ASSERT(!callee->isHostFunction());
- CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecode();
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForCall();
+ int argCount = stackFrame.args[2].int32();
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ Register* r = callFrame->registers() + numParameters;
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ Register* r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ stackFrame.callFrame = oldCallFrame;
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, stackFrame.args[1].returnAddress(), STUB_RETURN_ADDRESS);
+ RETURN_POINTER_PAIR(0, 0);
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ }
+
+ RETURN_POINTER_PAIR(callee, callFrame);
+}
+
+DEFINE_STUB_FUNCTION(VoidPtrPair, op_construct_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForConstruct();
int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
@@ -1848,11 +1913,11 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
STUB_INIT_STACK_FRAME(stackFrame);
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
ExecutableBase* executable = callee->executable();
- JITCode& jitCode = executable->generatedJITCode();
+ JITCode& jitCode = executable->generatedJITCodeForCall();
CodeBlock* codeBlock = 0;
if (!executable->isHostFunction())
- codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecode(stackFrame.callFrame, callee->scope().node());
+ codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecodeForCall(stackFrame.callFrame, callee->scope().node());
CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
if (!callLinkInfo->seenOnce())
@@ -1862,6 +1927,26 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
return jitCode.addressForCall().executableAddress();
}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ ExecutableBase* executable = callee->executable();
+ JITCode& jitCode = executable->generatedJITCodeForConstruct();
+
+ CodeBlock* codeBlock = 0;
+ if (!executable->isHostFunction())
+ codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecodeForConstruct(stackFrame.callFrame, callee->scope().node());
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkConstruct(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
+
+ return jitCode.addressForCall().executableAddress();
+}
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
@@ -2087,31 +2172,36 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
JSValue baseValue = stackFrame.args[0].jsValue();
JSValue subscript = stackFrame.args[1].jsValue();
- JSValue result;
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ Identifier propertyName(callFrame, asString(subscript)->value(callFrame));
+ PropertySlot slot(asCell(baseValue));
+ if (asCell(baseValue)->fastGetOwnPropertySlot(callFrame, propertyName, slot)) {
+ JSValue result = slot.getValue(callFrame, propertyName);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ }
- if (LIKELY(subscript.isUInt32())) {
+ if (subscript.isUInt32()) {
uint32_t i = subscript.asUInt32();
- if (isJSArray(globalData, baseValue)) {
- JSArray* jsArray = asArray(baseValue);
- if (jsArray->canGetIndex(i))
- result = jsArray->getIndex(i);
- else
- result = jsArray->JSArray::get(callFrame, i);
- } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
- result = asString(baseValue)->getIndex(callFrame, i);
- } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSValue result = asString(baseValue)->getIndex(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
- } else
- result = baseValue.get(callFrame, i);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- result = baseValue.get(callFrame, property);
+ }
+ JSValue result = baseValue.get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
}
-
+
+ Identifier property(callFrame, subscript.toString(callFrame));
+ JSValue result = baseValue.get(callFrame, property);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2865,6 +2955,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
return JSValue::encode(result);
}
+DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
{
STUB_INIT_STACK_FRAME(stackFrame);
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index fe4bcfb..841950f 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -78,7 +78,9 @@ namespace JSC {
struct TrampolineStructure {
MacroAssemblerCodePtr ctiStringLengthTrampoline;
MacroAssemblerCodePtr ctiVirtualCallLink;
+ MacroAssemblerCodePtr ctiVirtualConstructLink;
MacroAssemblerCodePtr ctiVirtualCall;
+ MacroAssemblerCodePtr ctiVirtualConstruct;
RefPtr<NativeExecutable> ctiNativeCallThunk;
MacroAssemblerCodePtr ctiSoftModulo;
};
@@ -274,7 +276,9 @@ namespace JSC {
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
NativeExecutable* ctiNativeCallThunk() { return m_trampolineStructure.ctiNativeCallThunk.get(); }
MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
@@ -337,6 +341,7 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
@@ -356,12 +361,14 @@ extern "C" {
JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+ VoidPtrPair JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
@@ -391,11 +398,13 @@ extern "C" {
void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION);
} // extern "C"
} // namespace JSC
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
index 2cd0e33..12a6cfa 100644
--- a/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -152,7 +152,7 @@ namespace JSC {
static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
- static const FPRegisterID fpRegT2 = MIPSRegisters::f10;
+ static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
#else
#error "JIT not supported on this platform."
#endif
@@ -169,6 +169,7 @@ namespace JSC {
#if USE(JSVALUE32) || USE(JSVALUE64)
Jump emitJumpIfImmediateNumber(RegisterID reg);
Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+ void emitFastArithImmToInt(RegisterID reg);
#endif
inline Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
@@ -260,7 +261,11 @@ namespace JSC {
done.link(this);
return notNumber;
}
-
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
+ {
+ }
+
#endif
#if USE(JSVALUE32)
@@ -283,6 +288,12 @@ namespace JSC {
ASSERT_NOT_REACHED();
return jump();
}
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID reg)
+ {
+ rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
+ }
+
#endif
#if !USE(JSVALUE32_64)
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
index e41411d..69925a9 100644
--- a/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -130,7 +130,7 @@ namespace JSC {
PassRefPtr<NativeExecutable> finalize()
{
LinkBuffer patchBuffer(this, m_pool.get());
- patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs.ctiNativeCallThunk()->generatedJITCode().addressForCall()));
+ patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs.ctiNativeCallThunk()->generatedJITCodeForCall().addressForCall()));
return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
}