summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h24
-rw-r--r--JavaScriptCore/jit/JIT.cpp1070
-rw-r--r--JavaScriptCore/jit/JIT.h147
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp367
-rw-r--r--JavaScriptCore/jit/JITCall.cpp108
-rw-r--r--JavaScriptCore/jit/JITCode.h97
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h59
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp373
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp2196
-rw-r--r--JavaScriptCore/jit/JITStubs.h226
10 files changed, 3597 insertions, 1070 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 1541256..0cb78ad 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -40,6 +40,18 @@
namespace JSC {
+inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+{
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ CRASH(); // Allocation is too large
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+}
+
class ExecutablePool : public RefCounted<ExecutablePool> {
private:
struct Allocation {
@@ -86,18 +98,6 @@ private:
static Allocation systemAlloc(size_t n);
static void systemRelease(const Allocation& alloc);
- inline size_t roundUpAllocationSize(size_t request, size_t granularity)
- {
- if ((std::numeric_limits<size_t>::max() - granularity) <= request)
- CRASH(); // Allocation is too large
-
- // Round up to next page boundary
- size_t size = request + (granularity - 1);
- size = size & ~(granularity - 1);
- ASSERT(size >= request);
- return size;
- }
-
ExecutablePool(size_t n);
void* poolAllocate(size_t n);
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index 5640c8a..e6113fc 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -79,14 +79,14 @@ asm(
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
- "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz) "\n"
+ "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPvz) "\n"
#else
#if USE(JIT_STUB_ARGUMENT_REGISTER)
"movl %esp, %ecx" "\n"
#else // JIT_STUB_ARGUMENT_STACK
"movl %esp, 0(%esp)" "\n"
#endif
- "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n"
+ "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n"
#endif
"addl $0x1c, %esp" "\n"
"popl %ebx" "\n"
@@ -138,7 +138,7 @@ asm(
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if USE(JIT_STUB_ARGUMENT_REGISTER)
"movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n"
+ "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n"
#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
#error "JIT_STUB_ARGUMENT configuration not supported."
#endif
@@ -186,7 +186,7 @@ extern "C" {
#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
#error "JIT_STUB_ARGUMENT configuration not supported."
#endif
- call JSC::Interpreter::cti_vm_throw;
+ call JSC::JITStubs::cti_vm_throw;
add esp, 0x1c;
pop ebx;
pop edi;
@@ -200,14 +200,19 @@ extern "C" {
#endif
-void ctiSetReturnAddress(void** where, void* what)
+void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo)
{
- *where = what;
+ *addressOfReturnAddress = newDestinationToReturnTo;
}
-void ctiPatchCallByReturnAddress(void* where, void* what)
+void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction)
{
- MacroAssembler::Jump::patch(where, what);
+ returnAddress.relinkCallerToFunction(newCalleeFunction);
+}
+
+void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction)
+{
+ returnAddress.relinkNearCallerToFunction(newCalleeFunction);
}
JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
@@ -228,62 +233,28 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
unsigned src1 = currentInstruction[2].u.operand;
unsigned src2 = currentInstruction[3].u.operand;
- emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
// Jump to a slow case if either operand is a number, or if both are JSCell*s.
- move(X86::eax, X86::ecx);
- orPtr(X86::edx, X86::ecx);
- addSlowCase(emitJumpIfJSCell(X86::ecx));
- addSlowCase(emitJumpIfImmediateNumber(X86::ecx));
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
if (type == OpStrictEq)
- sete32(X86::edx, X86::eax);
+ set32(Equal, regT1, regT0, regT0);
else
- setne32(X86::edx, X86::eax);
- emitTagAsBoolImmediate(X86::eax);
-#else
- bool negated = (type == OpNStrictEq);
-
- // Check that both are immediates, if so check if they're equal
- Jump firstNotImmediate = emitJumpIfJSCell(X86::eax);
- Jump secondNotImmediate = emitJumpIfJSCell(X86::edx);
- Jump bothWereImmediatesButNotEqual = jnePtr(X86::edx, X86::eax);
-
- // They are equal - set the result to true. (Or false, if negated).
- move(ImmPtr(JSValuePtr::encode(jsBoolean(!negated))), X86::eax);
- Jump bothWereImmediatesAndEqual = jump();
-
- // eax was not an immediate, we haven't yet checked edx.
- // If edx is also a JSCell, or is 0, then jump to a slow case,
- // otherwise these values are not equal.
- firstNotImmediate.link(this);
- emitJumpSlowCaseIfJSCell(X86::edx);
- addSlowCase(jePtr(X86::edx, ImmPtr(JSValuePtr::encode(js0()))));
- Jump firstWasNotImmediate = jump();
-
- // eax was an immediate, but edx wasn't.
- // If eax is 0 jump to a slow case, otherwise these values are not equal.
- secondNotImmediate.link(this);
- addSlowCase(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0()))));
-
- // We get here if the two values are different immediates, or one is 0 and the other is a JSCell.
- // Vaelues are not equal, set the result to false.
- bothWereImmediatesButNotEqual.link(this);
- firstWasNotImmediate.link(this);
- move(ImmPtr(JSValuePtr::encode(jsBoolean(negated))), X86::eax);
-
- bothWereImmediatesAndEqual.link(this);
-#endif
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
-void JIT::emitSlowScriptCheck()
+void JIT::emitTimeoutCheck()
{
- Jump skipTimeout = jnzSub32(Imm32(1), timeoutCheckRegister);
- emitCTICall(Interpreter::cti_timeout_check);
- move(X86::eax, timeoutCheckRegister);
+ Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ emitCTICall(JITStubs::cti_timeout_check);
+ move(regT0, timeoutCheckRegister);
skipTimeout.link(this);
killLastResultRegister();
@@ -296,21 +267,24 @@ void JIT::emitSlowScriptCheck()
#define CTI_COMPILE_BINARY_OP(name) \
case name: { \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); \
- emitCTICall(Interpreter::cti_##name); \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); \
+ emitCTICall(JITStubs::cti_##name); \
emitPutVirtualRegister(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
#define CTI_COMPILE_UNARY_OP(name) \
case name: { \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
- emitCTICall(Interpreter::cti_##name); \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \
+ emitCTICall(JITStubs::cti_##name); \
emitPutVirtualRegister(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
+#define RECORD_JUMP_TARGET(targetOffset) \
+ do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+
void JIT::privateCompileMainPass()
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
@@ -328,13 +302,31 @@ void JIT::privateCompileMainPass()
sampleInstruction(currentInstruction);
#endif
+ if (m_labels[m_bytecodeIndex].isUsed())
+ killLastResultRegister();
+
m_labels[m_bytecodeIndex] = label();
OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
switch (opcodeID) {
case op_mov: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ int src = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ storePtr(ImmPtr(JSValuePtr::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (dst == m_lastResultBytecodeRegister)
+ killLastResultRegister();
+ } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+ // If either the src or dst is the cached register go though
+ // get/put registers to make sure we track this correctly.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ // Perform the copy via regT1; do not disturb any mapping in regT0.
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ }
NEXT_OPCODE(op_mov);
}
case op_add: {
@@ -343,8 +335,9 @@ void JIT::privateCompileMainPass()
}
case op_end: {
if (m_codeBlock->needsFullScopeChain())
- emitCTICall(Interpreter::cti_op_end);
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitCTICall(JITStubs::cti_op_end);
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
NEXT_OPCODE(op_end);
@@ -352,6 +345,7 @@ void JIT::privateCompileMainPass()
case op_jmp: {
unsigned target = currentInstruction[1].u.operand;
addJump(jump(), target + 1);
+ RECORD_JUMP_TARGET(target + 1);
NEXT_OPCODE(op_jmp);
}
case op_pre_inc: {
@@ -359,60 +353,60 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_pre_inc);
}
case op_loop: {
- emitSlowScriptCheck();
+ emitTimeoutCheck();
unsigned target = currentInstruction[1].u.operand;
addJump(jump(), target + 1);
NEXT_OPCODE(op_end);
}
case op_loop_if_less: {
- emitSlowScriptCheck();
+ emitTimeoutCheck();
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(jl32(X86::eax, Imm32(op2imm)), target + 3);
+ addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
} else {
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
- addJump(jl32(X86::eax, X86::edx), target + 3);
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThan, regT0, regT1), target + 3);
}
NEXT_OPCODE(op_loop_if_less);
}
case op_loop_if_lesseq: {
- emitSlowScriptCheck();
+ emitTimeoutCheck();
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(jle32(X86::eax, Imm32(op2imm)), target + 3);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
} else {
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
- addJump(jle32(X86::eax, X86::edx), target + 3);
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
}
NEXT_OPCODE(op_loop_if_less);
}
case op_new_object: {
- emitCTICall(Interpreter::cti_op_new_object);
+ emitCTICall(JITStubs::cti_op_new_object);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_object);
}
@@ -425,49 +419,49 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_get_by_id);
}
case op_instanceof: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); // value
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx); // baseVal
- emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // proto
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); // value
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); // baseVal
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // proto
// check if any are immediates
- move(X86::eax, X86::ebx);
- orPtr(X86::ecx, X86::ebx);
- orPtr(X86::edx, X86::ebx);
- emitJumpSlowCaseIfNotJSCell(X86::ebx);
+ move(regT0, regT3);
+ orPtr(regT2, regT3);
+ orPtr(regT1, regT3);
+ emitJumpSlowCaseIfNotJSCell(regT3);
// check that all are object type - this is a bit of a bithack to avoid excess branching;
// we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
// this works because NumberType and StringType are smaller
- move(Imm32(3 * ObjectType), X86::ebx);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::eax);
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- loadPtr(Address(X86::edx, FIELD_OFFSET(JSCell, m_structure)), X86::edx);
- sub32(Address(X86::eax, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx);
- sub32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx);
- addSlowCase(jne32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx));
+ move(Imm32(3 * ObjectType), regT3);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT1);
+ sub32(Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
+ sub32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
+ addSlowCase(branch32(NotEqual, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3));
// check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
- load32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), X86::ecx);
- and32(Imm32(ImplementsHasInstance | OverridesHasInstance), X86::ecx);
- addSlowCase(jne32(X86::ecx, Imm32(ImplementsHasInstance)));
+ load32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), regT2);
+ and32(Imm32(ImplementsHasInstance | OverridesHasInstance), regT2);
+ addSlowCase(branch32(NotEqual, regT2, Imm32(ImplementsHasInstance)));
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::ecx); // reload value
- emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // reload proto
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); // reload value
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // reload proto
// optimistically load true result
- move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), X86::eax);
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), regT0);
Label loop(this);
// load value's prototype
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
- Jump exit = jePtr(X86::ecx, X86::edx);
+ Jump exit = branchPtr(Equal, regT2, regT1);
- jnePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull())), loop);
+ branchPtr(NotEqual, regT2, ImmPtr(JSValuePtr::encode(jsNull())), loop);
- move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), X86::eax);
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), regT0);
exit.link(this);
@@ -476,10 +470,10 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_instanceof);
}
case op_del_by_id: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitPutJITStubArgConstant(ident, 2);
- emitCTICall(Interpreter::cti_op_del_by_id);
+ emitCTICall(JITStubs::cti_op_del_by_id);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_del_by_id);
}
@@ -490,7 +484,7 @@ void JIT::privateCompileMainPass()
case op_new_func: {
FuncDeclNode* func = m_codeBlock->function(currentInstruction[2].u.operand);
emitPutJITStubArgConstant(func, 1);
- emitCTICall(Interpreter::cti_op_new_func);
+ emitCTICall(JITStubs::cti_op_new_func);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_func);
}
@@ -508,67 +502,71 @@ void JIT::privateCompileMainPass()
}
case op_get_global_var: {
JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
- move(ImmPtr(globalObject), X86::eax);
- emitGetVariableObjectRegister(X86::eax, currentInstruction[3].u.operand, X86::eax);
+ move(ImmPtr(globalObject), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_get_global_var);
}
case op_put_global_var: {
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::edx);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
- move(ImmPtr(globalObject), X86::eax);
- emitPutVariableObjectRegister(X86::edx, X86::eax, currentInstruction[2].u.operand);
+ move(ImmPtr(globalObject), regT0);
+ emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
NEXT_OPCODE(op_put_global_var);
}
case op_get_scoped_var: {
int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
- emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::eax);
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT0);
while (skip--)
- loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, next)), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
- loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, object)), X86::eax);
- emitGetVariableObjectRegister(X86::eax, currentInstruction[2].u.operand, X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_get_scoped_var);
}
case op_put_scoped_var: {
int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
- emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::edx);
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax);
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT1);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
while (skip--)
- loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, next)), X86::edx);
+ loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
- loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, object)), X86::edx);
- emitPutVariableObjectRegister(X86::eax, X86::edx, currentInstruction[1].u.operand);
+ loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
+ emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
NEXT_OPCODE(op_put_scoped_var);
}
case op_tear_off_activation: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
- emitCTICall(Interpreter::cti_op_tear_off_activation);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
+ emitCTICall(JITStubs::cti_op_tear_off_activation);
NEXT_OPCODE(op_tear_off_activation);
}
case op_tear_off_arguments: {
- emitCTICall(Interpreter::cti_op_tear_off_arguments);
+ emitCTICall(JITStubs::cti_op_tear_off_arguments);
NEXT_OPCODE(op_tear_off_arguments);
}
case op_ret: {
// We could JIT generate the deref, only calling out to C when the refcount hits zero.
if (m_codeBlock->needsFullScopeChain())
- emitCTICall(Interpreter::cti_op_ret_scopeChain);
+ emitCTICall(JITStubs::cti_op_ret_scopeChain);
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
// Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
// Grab the return address.
- emitGetFromCallFrameHeader(RegisterFile::ReturnPC, X86::edx);
+ emitGetFromCallFrameHeader(RegisterFile::ReturnPC, regT1);
// Restore our caller's "r".
emitGetFromCallFrameHeader(RegisterFile::CallerFrame, callFrameRegister);
// Return.
- push(X86::edx);
+ push(regT1);
ret();
NEXT_OPCODE(op_ret);
@@ -576,29 +574,29 @@ void JIT::privateCompileMainPass()
case op_new_array: {
emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
emitPutJITStubArgConstant(currentInstruction[3].u.operand, 2);
- emitCTICall(Interpreter::cti_op_new_array);
+ emitCTICall(JITStubs::cti_op_new_array);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_array);
}
case op_resolve: {
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 1);
- emitCTICall(Interpreter::cti_op_resolve);
+ emitCTICall(JITStubs::cti_op_resolve);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_resolve);
}
case op_construct_verify: {
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitJumpSlowCaseIfNotJSCell(X86::eax);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
NEXT_OPCODE(op_construct_verify);
}
case op_get_by_val: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
#if USE(ALTERNATE_JSIMMEDIATE)
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
@@ -606,27 +604,27 @@ void JIT::privateCompileMainPass()
// size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
// to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
// extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(X86::edx, X86::edx);
+ zeroExtend32ToPtr(regT1, regT1);
#else
- emitFastArithImmToInt(X86::edx);
+ emitFastArithImmToInt(regT1);
#endif
- emitJumpSlowCaseIfNotJSCell(X86::eax);
- addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
// This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
- addSlowCase(jae32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
// Get the value from the vector
- loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::eax);
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_get_by_val);
}
case op_resolve_func: {
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitPutJITStubArgConstant(ident, 1);
- emitCTICall(Interpreter::cti_op_resolve_func);
- emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx);
+ emitCTICall(JITStubs::cti_op_resolve_func);
+ emitPutVirtualRegister(currentInstruction[2].u.operand, regT1);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_resolve_func);
}
@@ -635,45 +633,45 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_sub);
}
case op_put_by_val: {
- emitGetVirtualRegisters(currentInstruction[1].u.operand, X86::eax, currentInstruction[2].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
+ emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
#if USE(ALTERNATE_JSIMMEDIATE)
// See comment in op_get_by_val.
- zeroExtend32ToPtr(X86::edx, X86::edx);
+ zeroExtend32ToPtr(regT1, regT1);
#else
- emitFastArithImmToInt(X86::edx);
+ emitFastArithImmToInt(regT1);
#endif
- emitJumpSlowCaseIfNotJSCell(X86::eax);
- addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
// This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
- Jump inFastVector = jb32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+ Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
// No; oh well, check if the access if within the vector - if so, we may still be okay.
- addSlowCase(jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
// This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
// FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
- addSlowCase(jzPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
+ addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
// All good - put the value into the array.
inFastVector.link(this);
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax);
- storePtr(X86::eax, BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
NEXT_OPCODE(op_put_by_val);
}
CTI_COMPILE_BINARY_OP(op_lesseq)
case op_loop_if_true: {
- emitSlowScriptCheck();
+ emitTimeoutCheck();
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0())));
- addJump(emitJumpIfImmediateInteger(X86::eax), target + 2);
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0())));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
- addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
- addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
isZero.link(this);
NEXT_OPCODE(op_loop_if_true);
@@ -681,13 +679,13 @@ void JIT::privateCompileMainPass()
case op_resolve_base: {
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 1);
- emitCTICall(Interpreter::cti_op_resolve_base);
+ emitCTICall(JITStubs::cti_op_resolve_base);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_resolve_base);
}
case op_negate: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
- emitCTICall(Interpreter::cti_op_negate);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
+ emitCTICall(JITStubs::cti_op_negate);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_negate);
}
@@ -695,7 +693,7 @@ void JIT::privateCompileMainPass()
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 1);
emitPutJITStubArgConstant(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(), 2);
- emitCTICall(Interpreter::cti_op_resolve_skip);
+ emitCTICall(JITStubs::cti_op_resolve_skip);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_resolve_skip);
}
@@ -709,14 +707,14 @@ void JIT::privateCompileMainPass()
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
// Check Structure of global object
- move(ImmPtr(globalObject), X86::eax);
- loadPtr(structureAddress, X86::edx);
- Jump noMatch = jnePtr(X86::edx, Address(X86::eax, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
// Load cached property
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), X86::eax);
- load32(offsetAddr, X86::edx);
- loadPtr(BaseIndex(X86::eax, X86::edx, ScalePtr), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), regT0);
+ load32(offsetAddr, regT1);
+ loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
Jump end = jump();
@@ -725,7 +723,7 @@ void JIT::privateCompileMainPass()
emitPutJITStubArgConstant(globalObject, 1);
emitPutJITStubArgConstant(ident, 2);
emitPutJITStubArgConstant(currentIndex, 3);
- emitCTICall(Interpreter::cti_op_resolve_global);
+ emitCTICall(JITStubs::cti_op_resolve_global);
emitPutVirtualRegister(currentInstruction[1].u.operand);
end.link(this);
NEXT_OPCODE(op_resolve_global);
@@ -740,81 +738,85 @@ void JIT::privateCompileMainPass()
unsigned op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(jge32(X86::eax, Imm32(op2imm)), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
} else {
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
- addJump(jge32(X86::eax, X86::edx), target + 3);
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
}
+ RECORD_JUMP_TARGET(target + 3);
NEXT_OPCODE(op_jnless);
}
case op_not: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax);
- addSlowCase(jnzPtr(X86::eax, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), X86::eax);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_not);
}
case op_jfalse: {
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0()))), target + 2);
- Jump isNonZero = emitJumpIfImmediateInteger(X86::eax);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0()))), target + 2);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2);
- addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))));
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))));
isNonZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jfalse);
};
case op_jeq_null: {
unsigned src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(src, X86::eax);
- Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- addJump(jnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
- addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jeq_null);
};
case op_jneq_null: {
unsigned src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(src, X86::eax);
- Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- addJump(jz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
- addJump(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jneq_null);
}
case op_post_inc: {
@@ -823,7 +825,7 @@ void JIT::privateCompileMainPass()
}
case op_unexpected_load: {
JSValuePtr v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
- move(ImmPtr(JSValuePtr::encode(v)), X86::eax);
+ move(ImmPtr(JSValuePtr::encode(v)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_unexpected_load);
}
@@ -833,17 +835,20 @@ void JIT::privateCompileMainPass()
DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst));
addJump(jump(), target + 2);
m_jsrSites.append(JSRInfo(storeLocation, label()));
+ killLastResultRegister();
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jsr);
}
case op_sret: {
jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ killLastResultRegister();
NEXT_OPCODE(op_sret);
}
case op_eq: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
- sete32(X86::edx, X86::eax);
- emitTagAsBoolImmediate(X86::eax);
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(Equal, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_eq);
}
@@ -860,13 +865,13 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_rshift);
}
case op_bitnot: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
- not32(X86::eax);
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ not32(regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- xorPtr(Imm32(~JSImmediate::TagTypeNumber), X86::eax);
+ xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
#endif
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitnot);
@@ -874,15 +879,15 @@ void JIT::privateCompileMainPass()
case op_resolve_with_base: {
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitPutJITStubArgConstant(ident, 1);
- emitCTICall(Interpreter::cti_op_resolve_with_base);
- emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx);
+ emitCTICall(JITStubs::cti_op_resolve_with_base);
+ emitPutVirtualRegister(currentInstruction[2].u.operand, regT1);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_resolve_with_base);
}
case op_new_func_exp: {
FuncExprNode* func = m_codeBlock->functionExpression(currentInstruction[2].u.operand);
emitPutJITStubArgConstant(func, 1);
- emitCTICall(Interpreter::cti_op_new_func_exp);
+ emitCTICall(JITStubs::cti_op_new_func_exp);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_func_exp);
}
@@ -892,23 +897,24 @@ void JIT::privateCompileMainPass()
}
case op_jtrue: {
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0())));
- addJump(emitJumpIfImmediateInteger(X86::eax), target + 2);
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0())));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
- addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
- addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
isZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jtrue);
}
CTI_COMPILE_BINARY_OP(op_less)
case op_neq: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
- setne32(X86::edx, X86::eax);
- emitTagAsBoolImmediate(X86::eax);
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
@@ -920,30 +926,31 @@ void JIT::privateCompileMainPass()
}
CTI_COMPILE_BINARY_OP(op_urshift)
case op_bitxor: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
- xorPtr(X86::edx, X86::eax);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xorPtr(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitxor);
}
case op_new_regexp: {
RegExp* regExp = m_codeBlock->regexp(currentInstruction[2].u.operand);
emitPutJITStubArgConstant(regExp, 1);
- emitCTICall(Interpreter::cti_op_new_regexp);
+ emitCTICall(JITStubs::cti_op_new_regexp);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_regexp);
}
case op_bitor: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
- emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
- orPtr(X86::edx, X86::eax);
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ orPtr(regT1, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitor);
}
case op_throw: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
- emitCTICall(Interpreter::cti_op_throw);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
+ emitCTICall(JITStubs::cti_op_throw);
+ ASSERT(regT0 == returnValueRegister);
#if PLATFORM(X86_64)
addPtr(Imm32(0x48), X86::esp);
pop(X86::ebx);
@@ -964,29 +971,29 @@ void JIT::privateCompileMainPass()
NEXT_OPCODE(op_throw);
}
case op_get_pnames: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
- emitCTICall(Interpreter::cti_op_get_pnames);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
+ emitCTICall(JITStubs::cti_op_get_pnames);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_get_pnames);
}
case op_next_pname: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
unsigned target = currentInstruction[3].u.operand;
- emitCTICall(Interpreter::cti_op_next_pname);
- Jump endOfIter = jzPtr(X86::eax);
+ emitCTICall(JITStubs::cti_op_next_pname);
+ Jump endOfIter = branchTestPtr(Zero, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
addJump(jump(), target + 3);
endOfIter.link(this);
NEXT_OPCODE(op_next_pname);
}
case op_push_scope: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
- emitCTICall(Interpreter::cti_op_push_scope);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
+ emitCTICall(JITStubs::cti_op_push_scope);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_push_scope);
}
case op_pop_scope: {
- emitCTICall(Interpreter::cti_op_pop_scope);
+ emitCTICall(JITStubs::cti_op_pop_scope);
NEXT_OPCODE(op_pop_scope);
}
CTI_COMPILE_UNARY_OP(op_typeof)
@@ -1006,13 +1013,13 @@ void JIT::privateCompileMainPass()
}
case op_to_jsnumber: {
int srcVReg = currentInstruction[2].u.operand;
- emitGetVirtualRegister(srcVReg, X86::eax);
+ emitGetVirtualRegister(srcVReg, regT0);
- Jump wasImmediate = emitJumpIfImmediateInteger(X86::eax);
+ Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
- emitJumpSlowCaseIfNotJSCell(X86::eax, srcVReg);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+ emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
wasImmediate.link(this);
@@ -1023,8 +1030,8 @@ void JIT::privateCompileMainPass()
case op_push_new_scope: {
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_push_new_scope);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
+ emitCTICall(JITStubs::cti_op_push_new_scope);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_push_new_scope);
}
@@ -1036,16 +1043,17 @@ void JIT::privateCompileMainPass()
case op_jmp_scopes: {
unsigned count = currentInstruction[1].u.operand;
emitPutJITStubArgConstant(count, 1);
- emitCTICall(Interpreter::cti_op_jmp_scopes);
+ emitCTICall(JITStubs::cti_op_jmp_scopes);
unsigned target = currentInstruction[2].u.operand;
addJump(jump(), target + 2);
+ RECORD_JUMP_TARGET(target + 2);
NEXT_OPCODE(op_jmp_scopes);
}
case op_put_by_index: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
- emitCTICall(Interpreter::cti_op_put_by_index);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
+ emitCTICall(JITStubs::cti_op_put_by_index);
NEXT_OPCODE(op_put_by_index);
}
case op_switch_imm: {
@@ -1058,10 +1066,10 @@ void JIT::privateCompileMainPass()
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(Interpreter::cti_op_switch_imm);
- jump(X86::eax);
+ emitCTICall(JITStubs::cti_op_switch_imm);
+ jump(regT0);
NEXT_OPCODE(op_switch_imm);
}
case op_switch_char: {
@@ -1074,10 +1082,10 @@ void JIT::privateCompileMainPass()
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(Interpreter::cti_op_switch_char);
- jump(X86::eax);
+ emitCTICall(JITStubs::cti_op_switch_char);
+ jump(regT0);
NEXT_OPCODE(op_switch_char);
}
case op_switch_string: {
@@ -1089,33 +1097,33 @@ void JIT::privateCompileMainPass()
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(Interpreter::cti_op_switch_string);
- jump(X86::eax);
+ emitCTICall(JITStubs::cti_op_switch_string);
+ jump(regT0);
NEXT_OPCODE(op_switch_string);
}
case op_del_by_val: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_del_by_val);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
+ emitCTICall(JITStubs::cti_op_del_by_val);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_del_by_val);
}
case op_put_getter: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
- emitCTICall(Interpreter::cti_op_put_getter);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
+ emitCTICall(JITStubs::cti_op_put_getter);
NEXT_OPCODE(op_put_getter);
}
case op_put_setter: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
- emitCTICall(Interpreter::cti_op_put_setter);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
+ emitCTICall(JITStubs::cti_op_put_setter);
NEXT_OPCODE(op_put_setter);
}
case op_new_error: {
@@ -1123,7 +1131,7 @@ void JIT::privateCompileMainPass()
emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
emitPutJITStubArgConstant(JSValuePtr::encode(message), 2);
emitPutJITStubArgConstant(m_bytecodeIndex, 3);
- emitCTICall(Interpreter::cti_op_new_error);
+ emitCTICall(JITStubs::cti_op_new_error);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_new_error);
}
@@ -1131,29 +1139,29 @@ void JIT::privateCompileMainPass()
emitPutJITStubArgConstant(currentInstruction[1].u.operand, 1);
emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
emitPutJITStubArgConstant(currentInstruction[3].u.operand, 3);
- emitCTICall(Interpreter::cti_op_debug);
+ emitCTICall(JITStubs::cti_op_debug);
NEXT_OPCODE(op_debug);
}
case op_eq_null: {
unsigned dst = currentInstruction[1].u.operand;
unsigned src1 = currentInstruction[2].u.operand;
- emitGetVirtualRegister(src1, X86::eax);
- Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- setnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
- sete32(Imm32(JSImmediate::FullTagTypeNull), X86::eax);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
wasNotImmediate.link(this);
- emitTagAsBoolImmediate(X86::eax);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
NEXT_OPCODE(op_eq_null);
@@ -1162,22 +1170,22 @@ void JIT::privateCompileMainPass()
unsigned dst = currentInstruction[1].u.operand;
unsigned src1 = currentInstruction[2].u.operand;
- emitGetVirtualRegister(src1, X86::eax);
- Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- setz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
- setne32(Imm32(JSImmediate::FullTagTypeNull), X86::eax);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
wasNotImmediate.link(this);
- emitTagAsBoolImmediate(X86::eax);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
NEXT_OPCODE(op_neq_null);
@@ -1200,41 +1208,41 @@ void JIT::privateCompileMainPass()
for (size_t j = 0; j < count; ++j)
emitInitRegister(j);
- emitCTICall(Interpreter::cti_op_push_activation);
+ emitCTICall(JITStubs::cti_op_push_activation);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_enter_with_activation);
}
case op_create_arguments: {
if (m_codeBlock->m_numParameters == 1)
- emitCTICall(Interpreter::cti_op_create_arguments_no_params);
+ emitCTICall(JITStubs::cti_op_create_arguments_no_params);
else
- emitCTICall(Interpreter::cti_op_create_arguments);
+ emitCTICall(JITStubs::cti_op_create_arguments);
NEXT_OPCODE(op_create_arguments);
}
case op_convert_this: {
- emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitJumpSlowCaseIfNotJSCell(X86::eax);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::edx);
- addSlowCase(jnz32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
+ addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
NEXT_OPCODE(op_convert_this);
}
case op_profile_will_call: {
- emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax);
- Jump noProfiler = jzPtr(Address(X86::eax));
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax);
- emitCTICall(Interpreter::cti_op_profile_will_call);
+ emitGetCTIParam(STUB_ARGS_profilerReference, regT0);
+ Jump noProfiler = branchTestPtr(Zero, Address(regT0));
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0);
+ emitCTICall(JITStubs::cti_op_profile_will_call);
noProfiler.link(this);
NEXT_OPCODE(op_profile_will_call);
}
case op_profile_did_call: {
- emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax);
- Jump noProfiler = jzPtr(Address(X86::eax));
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax);
- emitCTICall(Interpreter::cti_op_profile_did_call);
+ emitGetCTIParam(STUB_ARGS_profilerReference, regT0);
+ Jump noProfiler = branchTestPtr(Zero, Address(regT0));
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0);
+ emitCTICall(JITStubs::cti_op_profile_did_call);
noProfiler.link(this);
NEXT_OPCODE(op_profile_did_call);
@@ -1292,8 +1300,8 @@ void JIT::privateCompileSlowCases()
case op_convert_this: {
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_convert_this);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_convert_this);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_convert_this);
}
@@ -1304,7 +1312,7 @@ void JIT::privateCompileSlowCases()
case op_construct_verify: {
linkSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_construct_verify);
@@ -1316,25 +1324,25 @@ void JIT::privateCompileSlowCases()
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_get_by_val);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_get_by_val);
emitPutVirtualRegister(currentInstruction[1].u.operand);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
// This is slow case that handles accesses to arrays above the fast cut-off.
// First, check if this is an access to the vector
linkSlowCase(iter);
- jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
+ branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
// okay, missed the fast region, but it is still in the vector. Get the value.
- loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::ecx);
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2);
// Check whether the value loaded is zero; if so we need to return undefined.
- jzPtr(X86::ecx, beginGetByValSlow);
- move(X86::ecx, X86::eax);
- emitPutVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ branchTestPtr(Zero, regT2, beginGetByValSlow);
+ move(regT2, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
NEXT_OPCODE(op_get_by_val);
}
@@ -1355,17 +1363,17 @@ void JIT::privateCompileSlowCases()
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_loop_if_less);
- emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ emitCTICall(JITStubs::cti_op_loop_if_less);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_loop_if_less);
- emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_loop_if_less);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
}
NEXT_OPCODE(op_loop_if_less);
}
@@ -1382,17 +1390,17 @@ void JIT::privateCompileSlowCases()
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_loop_if_lesseq);
- emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2);
+ emitCTICall(JITStubs::cti_op_loop_if_lesseq);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_loop_if_lesseq);
- emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_loop_if_lesseq);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
}
NEXT_OPCODE(op_loop_if_lesseq);
}
@@ -1405,32 +1413,32 @@ void JIT::privateCompileSlowCases()
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this);
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitPutJITStubArg(X86::ecx, 3);
- emitCTICall(Interpreter::cti_op_put_by_val);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT2);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitPutJITStubArg(regT2, 3);
+ emitCTICall(JITStubs::cti_op_put_by_val);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
// slow cases for immediate int accesses to arrays
linkSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitPutJITStubArg(X86::ecx, 3);
- emitCTICall(Interpreter::cti_op_put_by_val_array);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT2);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitPutJITStubArg(regT2, 3);
+ emitCTICall(JITStubs::cti_op_put_by_val_array);
NEXT_OPCODE(op_put_by_val);
}
case op_loop_if_true: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_jtrue);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_jtrue);
unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(jnz32(X86::eax), target + 2);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
NEXT_OPCODE(op_loop_if_true);
}
case op_pre_dec: {
@@ -1442,34 +1450,34 @@ void JIT::privateCompileSlowCases()
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_jless);
- emitJumpSlowToHot(jz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2);
+ emitCTICall(JITStubs::cti_op_jless);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_jless);
- emitJumpSlowToHot(jz32(X86::eax), target + 3);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_jless);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
}
NEXT_OPCODE(op_jnless);
}
case op_not: {
linkSlowCase(iter);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_not);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_not);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_not);
}
case op_jfalse: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_jtrue);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_jtrue);
unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(jz32(X86::eax), target + 2); // inverted!
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // inverted!
NEXT_OPCODE(op_jfalse);
}
case op_post_inc: {
@@ -1478,8 +1486,8 @@ void JIT::privateCompileSlowCases()
}
case op_bitnot: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_bitnot);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_bitnot);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitnot);
}
@@ -1489,10 +1497,10 @@ void JIT::privateCompileSlowCases()
}
case op_jtrue: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_jtrue);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_jtrue);
unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(jnz32(X86::eax), target + 2);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
NEXT_OPCODE(op_jtrue);
}
case op_post_dec: {
@@ -1501,57 +1509,51 @@ void JIT::privateCompileSlowCases()
}
case op_bitxor: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_bitxor);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_bitxor);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitxor);
}
case op_bitor: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_bitor);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_bitor);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_bitor);
}
case op_eq: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_eq);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_eq);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_eq);
}
case op_neq: {
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_neq);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_neq);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_neq);
}
case op_stricteq: {
linkSlowCase(iter);
linkSlowCase(iter);
-#if !USE(ALTERNATE_JSIMMEDIATE)
- linkSlowCase(iter);
-#endif
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_stricteq);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_stricteq);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_stricteq);
}
case op_nstricteq: {
linkSlowCase(iter);
linkSlowCase(iter);
-#if !USE(ALTERNATE_JSIMMEDIATE)
- linkSlowCase(iter);
-#endif
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- emitCTICall(Interpreter::cti_op_nstricteq);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 2);
+ emitCTICall(JITStubs::cti_op_nstricteq);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_nstricteq);
}
@@ -1559,10 +1561,10 @@ void JIT::privateCompileSlowCases()
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, X86::ecx);
- emitCTICall(Interpreter::cti_op_instanceof);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, regT2);
+ emitCTICall(JITStubs::cti_op_instanceof);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_instanceof);
}
@@ -1591,8 +1593,8 @@ void JIT::privateCompileSlowCases()
linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_to_jsnumber);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_to_jsnumber);
emitPutVirtualRegister(currentInstruction[1].u.operand);
NEXT_OPCODE(op_to_jsnumber);
@@ -1627,8 +1629,8 @@ void JIT::privateCompile()
#endif
// Could use a pop_m, but would need to offset the following instruction if so.
- pop(X86::ecx);
- emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
+ pop(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
Jump slowRegisterFileCheck;
Label afterRegisterFileCheck;
@@ -1636,10 +1638,10 @@ void JIT::privateCompile()
// In the case of a fast linked call, we do not set this up in the caller.
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
- emitGetCTIParam(STUB_ARGS_registerFile, X86::eax);
- addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, X86::edx);
+ emitGetCTIParam(STUB_ARGS_registerFile, regT0);
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- slowRegisterFileCheck = jg32(X86::edx, Address(X86::eax, FIELD_OFFSET(RegisterFile, m_end)));
+ slowRegisterFileCheck = branch32(GreaterThan, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end)));
afterRegisterFileCheck = label();
}
@@ -1650,7 +1652,7 @@ void JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
slowRegisterFileCheck.link(this);
m_bytecodeIndex = 0; // emitCTICall will add to the map, but doesn't actually need this...
- emitCTICall(Interpreter::cti_register_file_check);
+ emitCTICall(JITStubs::cti_register_file_check);
#ifndef NDEBUG
// reset this, in order to guard it's use with asserts
m_bytecodeIndex = (unsigned)-1;
@@ -1660,7 +1662,7 @@ void JIT::privateCompile()
ASSERT(m_jmpTable.isEmpty());
- RefPtr<ExecutablePool> allocator = m_globalData->poolForSize(m_assembler.size());
+ RefPtr<ExecutablePool> allocator = m_globalData->executableAllocator.poolForSize(m_assembler.size());
void* code = m_assembler.executableCopy(allocator.get());
JITCodeRef codeRef(code, allocator);
#ifndef NDEBUG
@@ -1678,28 +1680,28 @@ void JIT::privateCompile()
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
- handler.nativeCode = patchBuffer.addressOf(m_labels[handler.target]);
+ handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
}
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1708,61 +1710,53 @@ void JIT::privateCompile()
}
if (m_codeBlock->hasExceptionInfo()) {
- m_codeBlock->pcVector().reserveCapacity(m_calls.size());
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->pcVector().append(PC(reinterpret_cast<void**>(patchBuffer.addressOf(iter->from)) - reinterpret_cast<void**>(code), iter->bytecodeIndex));
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex));
}
// Link absolute addresses for jsr
for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
- patchBuffer.setPtr(iter->storeLocation, patchBuffer.addressOf(iter->target));
+ patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).addressForJSR());
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- info.callReturnLocation = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
-#else
- info.callReturnLocation = 0;
- info.hotPathBegin = 0;
-#endif
+ info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
}
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
-#if ENABLE(JIT_OPTIMIZE_CALL)
- info.callReturnLocation = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
- info.hotPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathOther);
- info.coldPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].coldPathOther);
-#else
- info.callReturnLocation = 0;
- info.hotPathBegin = 0;
- info.hotPathOther = 0;
- info.coldPathOther = 0;
-#endif
+ info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].coldPathOther);
}
+#endif
m_codeBlock->setJITCode(codeRef);
}
-void JIT::privateCompileCTIMachineTrampolines()
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall)
{
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
// (1) The first function provides fast property access for array length
Label arrayLengthBegin = align();
// Check eax is an array
- Jump array_failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump array_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
+ Jump array_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::eax);
- load32(Address(X86::eax, FIELD_OFFSET(ArrayStorage, m_length)), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0);
+ load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0);
- Jump array_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
+ Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
- // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
ret();
@@ -1770,159 +1764,175 @@ void JIT::privateCompileCTIMachineTrampolines()
Label stringLengthBegin = align();
// Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump string_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsStringVptr));
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), X86::eax);
- load32(Address(X86::eax, FIELD_OFFSET(UString::Rep, len)), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), regT0);
+ load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0);
- Jump string_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
- // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
ret();
#endif
+#if !(PLATFORM(X86) || PLATFORM(X86_64))
+#error "This code is less portable than it looks this code assumes that regT3 is callee preserved, which happens to be true on x86/x86-64."
+#endif
+
// (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
Label virtualCallPreLinkBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
- loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
- Jump hasCodeBlock1 = jnzPtr(X86::eax);
- pop(X86::ebx);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
+ pop(regT3);
restoreArgumentReference();
- Jump callJSFunction1 = call();
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callJSFunction1 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
hasCodeBlock1.link(this);
// Check argCount matches callee arity.
- Jump arityCheckOkay1 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
- pop(X86::ebx);
- emitPutJITStubArg(X86::ebx, 2);
- emitPutJITStubArg(X86::eax, 4);
+ Jump arityCheckOkay1 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
+ pop(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
- Jump callArityCheck1 = call();
- move(X86::edx, callFrameRegister);
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
arityCheckOkay1.link(this);
compileOpCallInitializeCallFrame();
- pop(X86::ebx);
- emitPutJITStubArg(X86::ebx, 2);
+ pop(regT3);
+ emitPutJITStubArg(regT3, 2);
restoreArgumentReference();
- Jump callDontLazyLinkCall = call();
- push(X86::ebx);
+ Call callDontLazyLinkCall = call();
+ push(regT3);
- jump(X86::eax);
+ jump(regT0);
Label virtualCallLinkBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
- loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
- Jump hasCodeBlock2 = jnzPtr(X86::eax);
- pop(X86::ebx);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
+ pop(regT3);
restoreArgumentReference();
- Jump callJSFunction2 = call();
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callJSFunction2 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
hasCodeBlock2.link(this);
// Check argCount matches callee arity.
- Jump arityCheckOkay2 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
- pop(X86::ebx);
- emitPutJITStubArg(X86::ebx, 2);
- emitPutJITStubArg(X86::eax, 4);
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
+ pop(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
- Jump callArityCheck2 = call();
- move(X86::edx, callFrameRegister);
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
arityCheckOkay2.link(this);
compileOpCallInitializeCallFrame();
- pop(X86::ebx);
- emitPutJITStubArg(X86::ebx, 2);
+ pop(regT3);
+ emitPutJITStubArg(regT3, 2);
restoreArgumentReference();
- Jump callLazyLinkCall = call();
- push(X86::ebx);
+ Call callLazyLinkCall = call();
+ push(regT3);
- jump(X86::eax);
+ jump(regT0);
Label virtualCallBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
- loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
- Jump hasCodeBlock3 = jnzPtr(X86::eax);
- pop(X86::ebx);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
+ pop(regT3);
restoreArgumentReference();
- Jump callJSFunction3 = call();
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callJSFunction3 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
hasCodeBlock3.link(this);
// Check argCount matches callee arity.
- Jump arityCheckOkay3 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
- pop(X86::ebx);
- emitPutJITStubArg(X86::ebx, 2);
- emitPutJITStubArg(X86::eax, 4);
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
+ pop(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
- Jump callArityCheck3 = call();
- move(X86::edx, callFrameRegister);
- emitGetJITStubArg(1, X86::ecx);
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+ Call callArityCheck3 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ push(regT3);
arityCheckOkay3.link(this);
compileOpCallInitializeCallFrame();
// load ctiCode from the new codeBlock.
- loadPtr(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_jitCode)), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(CodeBlock, m_jitCode)), regT0);
- jump(X86::eax);
+ jump(regT0);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
+ Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2);
+ Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3);
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- m_interpreter->m_executablePool = m_globalData->poolForSize(m_assembler.size());
- void* code = m_assembler.executableCopy(m_interpreter->m_executablePool.get());
- PatchBuffer patchBuffer(code);
+ *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+ void* code = m_assembler.executableCopy((*executablePool).get());
+ PatchBuffer patchBuffer(code);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
- patchBuffer.link(string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
-
- m_interpreter->m_ctiArrayLengthTrampoline = patchBuffer.addressOf(arrayLengthBegin);
- m_interpreter->m_ctiStringLengthTrampoline = patchBuffer.addressOf(stringLengthBegin);
+ patchBuffer.link(array_failureCases1Call, JITStubs::cti_op_get_by_id_array_fail);
+ patchBuffer.link(array_failureCases2Call, JITStubs::cti_op_get_by_id_array_fail);
+ patchBuffer.link(array_failureCases3Call, JITStubs::cti_op_get_by_id_array_fail);
+ patchBuffer.link(string_failureCases1Call, JITStubs::cti_op_get_by_id_string_fail);
+ patchBuffer.link(string_failureCases2Call, JITStubs::cti_op_get_by_id_string_fail);
+ patchBuffer.link(string_failureCases3Call, JITStubs::cti_op_get_by_id_string_fail);
+
+ *ctiArrayLengthTrampoline = patchBuffer.trampolineAt(arrayLengthBegin);
+ *ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiArrayLengthTrampoline);
+ UNUSED_PARAM(ctiStringLengthTrampoline);
#endif
- patchBuffer.link(callArityCheck1, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck2, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck3, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction2, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction3, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
- patchBuffer.link(callDontLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall));
- patchBuffer.link(callLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall));
-
- m_interpreter->m_ctiVirtualCallPreLink = patchBuffer.addressOf(virtualCallPreLinkBegin);
- m_interpreter->m_ctiVirtualCallLink = patchBuffer.addressOf(virtualCallLinkBegin);
- m_interpreter->m_ctiVirtualCall = patchBuffer.addressOf(virtualCallBegin);
+ patchBuffer.link(callArityCheck1, JITStubs::cti_op_call_arityCheck);
+ patchBuffer.link(callArityCheck2, JITStubs::cti_op_call_arityCheck);
+ patchBuffer.link(callArityCheck3, JITStubs::cti_op_call_arityCheck);
+ patchBuffer.link(callJSFunction1, JITStubs::cti_op_call_JSFunction);
+ patchBuffer.link(callJSFunction2, JITStubs::cti_op_call_JSFunction);
+ patchBuffer.link(callJSFunction3, JITStubs::cti_op_call_JSFunction);
+ patchBuffer.link(callDontLazyLinkCall, JITStubs::cti_vm_dontLazyLinkCall);
+ patchBuffer.link(callLazyLinkCall, JITStubs::cti_vm_lazyLinkCall);
+
+ *ctiVirtualCallPreLink = patchBuffer.trampolineAt(virtualCallPreLinkBegin);
+ *ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ *ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
}
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index d13fbb5..25c7825 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -27,17 +27,19 @@
#define JIT_h
#include <wtf/Platform.h>
-#include <bytecode/SamplingTool.h>
#if ENABLE(JIT)
#define WTF_USE_CTI_REPATCH_PIC 1
#include "Interpreter.h"
+#include "JITCode.h"
+#include "JITStubs.h"
#include "Opcode.h"
#include "RegisterFile.h"
#include "MacroAssembler.h"
#include "Profiler.h"
+#include <bytecode/SamplingTool.h>
#include <wtf/AlwaysInline.h>
#include <wtf/Vector.h>
@@ -113,7 +115,7 @@ namespace JSC {
typedef VoidPtrPair (JIT_STUB *CTIHelper_2)(STUB_ARGS);
struct CallRecord {
- MacroAssembler::Jump from;
+ MacroAssembler::Call from;
unsigned bytecodeIndex;
void* to;
@@ -121,7 +123,7 @@ namespace JSC {
{
}
- CallRecord(MacroAssembler::Jump from, unsigned bytecodeIndex, void* to = 0)
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
: from(from)
, bytecodeIndex(bytecodeIndex)
, to(to)
@@ -188,44 +190,73 @@ namespace JSC {
};
struct PropertyStubCompilationInfo {
- MacroAssembler::Jump callReturnLocation;
+ MacroAssembler::Call callReturnLocation;
MacroAssembler::Label hotPathBegin;
};
struct StructureStubCompilationInfo {
MacroAssembler::DataLabelPtr hotPathBegin;
- MacroAssembler::Jump hotPathOther;
- MacroAssembler::Jump callReturnLocation;
+ MacroAssembler::Call hotPathOther;
+ MacroAssembler::Call callReturnLocation;
MacroAssembler::Label coldPathOther;
};
extern "C" {
- JSValueEncodedAsPointer* ctiTrampoline(
-#if PLATFORM(X86_64)
- // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
- // We can allow register passing here, and move the writes of these values into the trampoline.
- void*, void*, void*, void*, void*, void*,
-#endif
- void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*);
void ctiVMThrowTrampoline();
};
- void ctiSetReturnAddress(void** where, void* what);
- void ctiPatchCallByReturnAddress(void* where, void* what);
+ void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo);
+ void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
+ void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
class JIT : private MacroAssembler {
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
using MacroAssembler::Label;
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
#if PLATFORM(X86_64)
+ static const RegisterID returnValueRegister = X86::eax;
+ static const RegisterID cachedResultRegister = X86::eax;
+ static const RegisterID firstArgumentRegister = X86::edi;
+
static const RegisterID timeoutCheckRegister = X86::r12;
static const RegisterID callFrameRegister = X86::r13;
static const RegisterID tagTypeNumberRegister = X86::r14;
static const RegisterID tagMaskRegister = X86::r15;
-#else
+
+ static const RegisterID regT0 = X86::eax;
+ static const RegisterID regT1 = X86::edx;
+ static const RegisterID regT2 = X86::ecx;
+ // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
+ static const RegisterID regT3 = X86::ebx;
+#elif PLATFORM(X86)
+ static const RegisterID returnValueRegister = X86::eax;
+ static const RegisterID cachedResultRegister = X86::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86::ecx;
+
static const RegisterID timeoutCheckRegister = X86::esi;
static const RegisterID callFrameRegister = X86::edi;
+
+ static const RegisterID regT0 = X86::eax;
+ static const RegisterID regT1 = X86::edx;
+ static const RegisterID regT2 = X86::ecx;
+ // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
+ static const RegisterID regT3 = X86::ebx;
+#else
+ #error "JIT not supported on this platform."
#endif
static const int patchGetByIdDefaultStructure = -1;
@@ -255,9 +286,9 @@ namespace JSC {
static const int patchOffsetGetByIdPropertyMapOffset = 31;
static const int patchOffsetGetByIdPutResult = 31;
#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 53 + ctiArgumentInitSize;
+ static const int patchOffsetGetByIdSlowCaseCall = 61 + ctiArgumentInitSize;
#else
- static const int patchOffsetGetByIdSlowCaseCall = 30 + ctiArgumentInitSize;
+ static const int patchOffsetGetByIdSlowCaseCall = 38 + ctiArgumentInitSize;
#endif
static const int patchOffsetOpCallCompareToJump = 9;
#else
@@ -284,13 +315,13 @@ namespace JSC {
jit.privateCompile();
}
- static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+ static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdSelf(stubInfo, structure, cachedOffset, returnAddress);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress)
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
@@ -314,51 +345,43 @@ namespace JSC {
}
#endif
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+ static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompilePutByIdReplace(stubInfo, structure, cachedOffset, returnAddress);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
}
- static void compileCTIMachineTrampolines(JSGlobalData* globalData)
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall)
+
{
JIT jit(globalData);
- jit.privateCompileCTIMachineTrampolines();
+ jit.privateCompileCTIMachineTrampolines(executablePool, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall);
}
- static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
- static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
+ static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
+ static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, void* returnAddress)
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount);
+ static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount);
static void unlinkCall(CallLinkInfo*);
- inline static JSValuePtr execute(void* code, RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception)
- {
- return JSValuePtr::decode(ctiTrampoline(
-#if PLATFORM(X86_64)
- 0, 0, 0, 0, 0, 0,
-#endif
- code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
- }
-
private:
JIT(JSGlobalData*, CodeBlock* = 0);
@@ -366,19 +389,19 @@ namespace JSC {
void privateCompileLinkPass();
void privateCompileSlowCases();
void privateCompile();
- void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
#if USE(CTI_REPATCH_PIC)
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
#endif
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, void* returnAddress);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress);
- void privateCompileCTIMachineTrampolines();
- void privateCompilePatchGetArrayLength(void* returnAddress);
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall);
+ void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress);
void addSlowCase(Jump);
void addJump(Jump, int);
@@ -396,7 +419,6 @@ namespace JSC {
void compileOpConstructSetupArgs(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- void putDoubleResultToJSNumberCellOrJSImmediate(X86Assembler::XMMRegisterID xmmSource, RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86Assembler::XMMRegisterID tempXmm, RegisterID tempReg1, RegisterID tempReg2);
void compileFastArith_op_add(Instruction*);
void compileFastArith_op_sub(Instruction*);
@@ -427,7 +449,7 @@ namespace JSC {
void emitGetVirtualRegister(int src, RegisterID dst);
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
- void emitPutVirtualRegister(unsigned dst, RegisterID from = X86::eax);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
@@ -458,6 +480,16 @@ namespace JSC {
#if USE(ALTERNATE_JSIMMEDIATE)
JIT::Jump emitJumpIfImmediateNumber(RegisterID);
JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
+#else
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
#endif
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
@@ -492,21 +524,20 @@ namespace JSC {
void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
- Jump emitNakedCall(RegisterID);
- Jump emitNakedCall(void* function);
- Jump emitCTICall_internal(void*);
- Jump emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Jump emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitNakedCall(void* function);
+ Call emitCTICall_internal(void*);
+ Call emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
- void emitSlowScriptCheck();
+ void emitTimeoutCheck();
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
#endif
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 0a3e9ab..8fe245e 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -48,23 +48,23 @@ namespace JSC {
void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
{
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
// FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
- emitFastArithImmToInt(X86::eax);
- emitFastArithImmToInt(X86::ecx);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT2);
#if !PLATFORM(X86)
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
// On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
- and32(Imm32(0x1f), X86::ecx);
+ and32(Imm32(0x1f), regT2);
#endif
- lshift32(X86::ecx, X86::eax);
+ lshift32(regT2, regT0);
#if !USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(joAdd32(X86::eax, X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
#endif
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
@@ -79,47 +79,47 @@ void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned
Jump notImm1 = getSlowCase(iter);
Jump notImm2 = getSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
notImm1.link(this);
notImm2.link(this);
#endif
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::ecx, 2);
- emitCTICall(Interpreter::cti_op_lshift);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT2, 2);
+ emitCTICall(JITStubs::cti_op_lshift);
emitPutVirtualRegister(result);
}
void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
{
if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
#if USE(ALTERNATE_JSIMMEDIATE)
- rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax);
+ rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
#else
- rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax);
+ rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
#endif
} else {
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
- emitFastArithImmToInt(X86::ecx);
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT2);
#if !PLATFORM(X86)
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
// On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
- and32(Imm32(0x1f), X86::ecx);
+ and32(Imm32(0x1f), regT2);
#endif
#if USE(ALTERNATE_JSIMMEDIATE)
- rshift32(X86::ecx, X86::eax);
+ rshift32(regT2, regT0);
#else
- rshiftPtr(X86::ecx, X86::eax);
+ rshiftPtr(regT2, regT0);
#endif
}
#if USE(ALTERNATE_JSIMMEDIATE)
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- orPtr(Imm32(JSImmediate::TagTypeNumber), X86::eax);
+ orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
#endif
emitPutVirtualRegister(result);
}
@@ -127,45 +127,45 @@ void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2
{
linkSlowCase(iter);
if (isOperandConstantImmediateInt(op2))
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
else {
linkSlowCase(iter);
- emitPutJITStubArg(X86::ecx, 2);
+ emitPutJITStubArg(regT2, 2);
}
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_rshift);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_rshift);
emitPutVirtualRegister(result);
}
void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
{
if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
int32_t imm = getConstantOperandImmediateInt(op1);
- andPtr(Imm32(imm), X86::eax);
+ andPtr(Imm32(imm), regT0);
if (imm >= 0)
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), X86::eax);
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
#endif
} else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
int32_t imm = getConstantOperandImmediateInt(op2);
- andPtr(Imm32(imm), X86::eax);
+ andPtr(Imm32(imm), regT0);
if (imm >= 0)
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), X86::eax);
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
#endif
} else {
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
- andPtr(X86::edx, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ andPtr(regT1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
}
emitPutVirtualRegister(result);
}
@@ -173,31 +173,34 @@ void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned
{
linkSlowCase(iter);
if (isOperandConstantImmediateInt(op1)) {
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArg(X86::eax, 2);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArg(regT0, 2);
} else if (isOperandConstantImmediateInt(op2)) {
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
} else {
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArg(X86::edx, 2);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArg(regT1, 2);
}
- emitCTICall(Interpreter::cti_op_bitand);
+ emitCTICall(JITStubs::cti_op_bitand);
emitPutVirtualRegister(result);
}
+#if PLATFORM(X86) || PLATFORM(X86_64)
void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
{
emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(js0()))));
- mod32(X86::ecx, X86::eax, X86::edx);
+ addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValuePtr::encode(js0()))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
#else
emitFastArithDeTagImmediate(X86::eax);
addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
- mod32(X86::ecx, X86::eax, X86::edx);
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
signExtend32ToPtr(X86::edx, X86::edx);
#endif
emitFastArithReTagImmediate(X86::edx, X86::eax);
@@ -220,70 +223,83 @@ void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vecto
#endif
emitPutJITStubArg(X86::eax, 1);
emitPutJITStubArg(X86::ecx, 2);
- emitCTICall(Interpreter::cti_op_mod);
+ emitCTICall(JITStubs::cti_op_mod);
+ emitPutVirtualRegister(result);
+}
+#else
+void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
+{
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ emitCTICall(JITStubs::cti_op_mod);
emitPutVirtualRegister(result);
}
+void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+#endif
void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
{
- emitGetVirtualRegister(srcDst, X86::eax);
- move(X86::eax, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(joAdd32(Imm32(1), X86::edx));
- emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
#else
- addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
- signExtend32ToPtr(X86::edx, X86::edx);
+ addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
+ signExtend32ToPtr(regT1, regT1);
#endif
- emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_post_inc);
- emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_post_inc);
+ emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
{
- emitGetVirtualRegister(srcDst, X86::eax);
- move(X86::eax, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(joSub32(Imm32(1), X86::edx));
- emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ addSlowCase(branchSub32(Zero, Imm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
#else
- addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
- signExtend32ToPtr(X86::edx, X86::edx);
+ addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
+ signExtend32ToPtr(regT1, regT1);
#endif
- emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_post_dec);
- emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_post_dec);
+ emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
{
- emitGetVirtualRegister(srcDst, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(joAdd32(Imm32(1), X86::eax));
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
+ addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
#endif
emitPutVirtualRegister(srcDst);
}
@@ -291,23 +307,23 @@ void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>
{
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_pre_inc);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_pre_inc);
emitPutVirtualRegister(srcDst);
}
void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
{
- emitGetVirtualRegister(srcDst, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(joSub32(Imm32(1), X86::eax));
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ addSlowCase(branchSub32(Zero, Imm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
#else
- addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
+ addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
#endif
emitPutVirtualRegister(srcDst);
}
@@ -315,10 +331,10 @@ void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>
{
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, X86::eax);
+ emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- emitPutJITStubArg(X86::eax, 1);
- emitCTICall(Interpreter::cti_op_pre_dec);
+ emitPutJITStubArg(regT0, 1);
+ emitCTICall(JITStubs::cti_op_pre_dec);
emitPutVirtualRegister(srcDst);
}
@@ -331,9 +347,9 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ emitCTICall(JITStubs::cti_op_add);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
@@ -347,9 +363,9 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_mul);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ emitCTICall(JITStubs::cti_op_mul);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
@@ -363,9 +379,9 @@ void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_sub);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ emitCTICall(JITStubs::cti_op_sub);
emitPutVirtualRegister(result);
}
void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
@@ -381,13 +397,13 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
if (opcodeID == op_add)
- addSlowCase(joAdd32(X86::edx, X86::eax));
+ addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
else if (opcodeID == op_sub)
- addSlowCase(joSub32(X86::edx, X86::eax));
+ addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
else {
ASSERT(opcodeID == op_mul);
- addSlowCase(joMul32(X86::edx, X86::eax));
- addSlowCase(jz32(X86::eax));
+ addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
+ addSlowCase(branchTest32(Zero, X86::eax));
}
emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
}
@@ -409,12 +425,12 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitPutJITStubArg(X86::eax, 1);
emitPutJITStubArg(X86::edx, 2);
if (opcodeID == op_add)
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
else if (opcodeID == op_sub)
- emitCTICall(Interpreter::cti_op_sub);
+ emitCTICall(JITStubs::cti_op_sub);
else {
ASSERT(opcodeID == op_mul);
- emitCTICall(Interpreter::cti_op_mul);
+ emitCTICall(JITStubs::cti_op_mul);
}
Jump end = jump();
@@ -464,7 +480,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
emitPutVirtualRegister(result);
return;
}
@@ -472,12 +488,12 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
} else
compileBinaryArithOp(op_add, result, op1, op2, types);
@@ -496,13 +512,13 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
linkSlowCase(iter);
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
} else if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
} else
compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
@@ -521,12 +537,12 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
emitGetVirtualRegister(op2, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
emitFastArithReTagImmediate(X86::eax, X86::eax);
} else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
emitGetVirtualRegister(op1, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
emitFastArithReTagImmediate(X86::eax, X86::eax);
} else
compileBinaryArithOp(op_mul, result, op1, op2, types);
@@ -547,7 +563,7 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_mul);
+ emitCTICall(JITStubs::cti_op_mul);
} else
compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
@@ -605,9 +621,19 @@ static bool isSSE2Present()
cpuid;
mov flags, edx;
}
+#elif COMPILER(GCC)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
#else
flags = 0;
- // FIXME: Add GCC code to do above asm
#endif
present = (flags & SSE2FeatureBit) != 0;
}
@@ -619,53 +645,11 @@ static bool isSSE2Present()
#endif
-/*
- This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
-
- In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
- is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
-
- However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
- control will fall through from the code planted.
-*/
-void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
-{
- // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
- __ cvttsd2si_rr(xmmSource, tempReg1);
- __ addl_rr(tempReg1, tempReg1);
- __ sarl_i8r(1, tempReg1);
- __ cvtsi2sd_rr(tempReg1, tempXmm);
- // Compare & branch if immediate.
- __ ucomisd_rr(tempXmm, xmmSource);
- JmpSrc resultIsImm = __ je();
- JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
-
- // Store the result to the JSNumberCell and jump.
- __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
- if (jsNumberCell != X86::eax)
- __ movl_rr(jsNumberCell, X86::eax);
- emitPutVirtualRegister(dst);
- *wroteJSNumberCell = __ jmp();
-
- __ link(resultIsImm, __ label());
- // value == (double)(JSImmediate)value... or at least, it looks that way...
- // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
- __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
- __ pextrw_irr(3, xmmSource, tempReg2);
- __ cmpl_ir(0x8000, tempReg2);
- __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
- // Yes it really really really is representable as a JSImmediate.
- emitFastArithIntToImmNoCheck(tempReg1, X86::eax);
- emitPutVirtualRegister(dst);
-}
-
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
{
Structure* numberStructure = m_globalData->numberStructure.get();
JmpSrc wasJSNumberCell1;
- JmpSrc wasJSNumberCell1b;
JmpSrc wasJSNumberCell2;
- JmpSrc wasJSNumberCell2b;
emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
@@ -695,11 +679,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
__ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
JmpSrc loadedDouble = __ jmp();
// (1b) if we get here, src1 is an immediate
- __ link(op1imm, __ label());
+ __ linkJump(op1imm, __ label());
emitFastArithImmToInt(X86::eax);
__ cvtsi2sd_rr(X86::eax, X86::xmm0);
// (1c)
- __ link(loadedDouble, __ label());
+ __ linkJump(loadedDouble, __ label());
if (opcodeID == op_add)
__ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
else if (opcodeID == op_sub)
@@ -709,12 +693,15 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
__ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
}
- putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
- wasJSNumberCell2b = __ jmp();
+ // Store the result to the JSNumberCell and jump.
+ __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx);
+ __ movl_rr(X86::edx, X86::eax);
+ emitPutVirtualRegister(dst);
+ wasJSNumberCell2 = __ jmp();
// (2) This handles cases where src2 is an immediate number.
// Two slow cases - either src1 isn't an immediate, or the subtract overflows.
- __ link(op2imm, __ label());
+ __ linkJump(op2imm, __ label());
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
} else if (types.first().isReusable() && isSSE2Present()) {
ASSERT(types.first().mightBeNumber());
@@ -742,11 +729,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
__ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
JmpSrc loadedDouble = __ jmp();
// (1b) if we get here, src2 is an immediate
- __ link(op2imm, __ label());
+ __ linkJump(op2imm, __ label());
emitFastArithImmToInt(X86::edx);
__ cvtsi2sd_rr(X86::edx, X86::xmm1);
// (1c)
- __ link(loadedDouble, __ label());
+ __ linkJump(loadedDouble, __ label());
__ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
if (opcodeID == op_add)
__ addsd_rr(X86::xmm1, X86::xmm0);
@@ -759,12 +746,14 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
__ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
emitPutVirtualRegister(dst);
- putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
- wasJSNumberCell1b = __ jmp();
+ // Store the result to the JSNumberCell and jump.
+ __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
+ emitPutVirtualRegister(dst);
+ wasJSNumberCell1 = __ jmp();
// (2) This handles cases where src1 is an immediate number.
// Two slow cases - either src2 isn't an immediate, or the subtract overflows.
- __ link(op1imm, __ label());
+ __ linkJump(op1imm, __ label());
emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
} else
emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
@@ -782,17 +771,17 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
ASSERT(opcodeID == op_mul);
// convert eax & edx from JSImmediates to ints, and check if either are zero
emitFastArithImmToInt(X86::edx);
- JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
+ Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
__ testl_rr(X86::edx, X86::edx);
JmpSrc op2NonZero = __ jne();
- __ link(op1Zero, __ label());
+ op1Zero.link(this);
// if either input is zero, add the two together, and check if the result is < 0.
// If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
__ movl_rr(X86::eax, X86::ecx);
__ addl_rr(X86::edx, X86::ecx);
addSlowCase(__ js());
// Skip the above check if neither input is zero
- __ link(op2NonZero, __ label());
+ __ linkJump(op2NonZero, __ label());
__ imull_rr(X86::edx, X86::eax);
addSlowCase(__ jo());
signExtend32ToPtr(X86::eax, X86::eax);
@@ -801,12 +790,10 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
emitPutVirtualRegister(dst);
if (types.second().isReusable() && isSSE2Present()) {
- __ link(wasJSNumberCell2, __ label());
- __ link(wasJSNumberCell2b, __ label());
+ __ linkJump(wasJSNumberCell2, __ label());
}
else if (types.first().isReusable() && isSSE2Present()) {
- __ link(wasJSNumberCell1, __ label());
- __ link(wasJSNumberCell1b, __ label());
+ __ linkJump(wasJSNumberCell1, __ label());
}
}
@@ -841,12 +828,12 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
if (opcodeID == op_add)
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
else if (opcodeID == op_sub)
- emitCTICall(Interpreter::cti_op_sub);
+ emitCTICall(JITStubs::cti_op_sub);
else {
ASSERT(opcodeID == op_mul);
- emitCTICall(Interpreter::cti_op_mul);
+ emitCTICall(JITStubs::cti_op_mul);
}
emitPutVirtualRegister(dst);
}
@@ -860,13 +847,13 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
signExtend32ToPtr(X86::eax, X86::eax);
emitPutVirtualRegister(result);
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
signExtend32ToPtr(X86::eax, X86::eax);
emitPutVirtualRegister(result);
} else {
@@ -876,7 +863,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
else {
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
emitPutVirtualRegister(result);
}
}
@@ -894,7 +881,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
notImm.link(this);
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArg(X86::eax, 2);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
emitPutVirtualRegister(result);
} else if (isOperandConstantImmediateInt(op2)) {
Jump notImm = getSlowCase(iter);
@@ -903,7 +890,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
notImm.link(this);
emitPutJITStubArg(X86::eax, 1);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_add);
+ emitCTICall(JITStubs::cti_op_add);
emitPutVirtualRegister(result);
} else {
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
@@ -924,7 +911,7 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
emitGetVirtualRegister(op2, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
signExtend32ToPtr(X86::eax, X86::eax);
emitFastArithReTagImmediate(X86::eax, X86::eax);
emitPutVirtualRegister(result);
@@ -932,7 +919,7 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
emitGetVirtualRegister(op1, X86::eax);
emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
signExtend32ToPtr(X86::eax, X86::eax);
emitFastArithReTagImmediate(X86::eax, X86::eax);
emitPutVirtualRegister(result);
@@ -952,7 +939,7 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(Interpreter::cti_op_mul);
+ emitCTICall(JITStubs::cti_op_mul);
emitPutVirtualRegister(result);
} else
compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index af26712..62c7149 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -49,10 +49,11 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- DataLabelPtr::patch(callLinkInfo->hotPathBegin, JSValuePtr::encode(jsImpossibleValue()));
+ callLinkInfo->hotPathBegin.repatch(JSValuePtr::encode(jsImpossibleValue()));
}
-void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
+//void JIT::linkCall(JSFunction* , CodeBlock* , JITCode , CallLinkInfo* callLinkInfo, int )
+void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
{
// Currently we only link calls with the exact number of arguments.
if (callerArgCount == calleeCodeBlock->m_numParameters) {
@@ -60,24 +61,23 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode
calleeCodeBlock->addCaller(callLinkInfo);
- DataLabelPtr::patch(callLinkInfo->hotPathBegin, callee);
- Jump::patch(callLinkInfo->hotPathOther, ctiCode);
+ callLinkInfo->hotPathBegin.repatch(callee);
+ callLinkInfo->hotPathOther.relink(ctiCode.addressForCall());
}
// patch the instruction that jumps out to the cold path, so that we only try to link once.
- void* patchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + patchOffsetOpCallCompareToJump);
- Jump::patch(patchCheck, callLinkInfo->coldPathOther);
+ callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
}
void JIT::compileOpCallInitializeCallFrame()
{
- store32(X86::edx, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
- storePtr(X86::ecx, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
- storePtr(X86::edx, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
+ storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
void JIT::compileOpCallSetupArgs(Instruction* instruction)
@@ -86,7 +86,7 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
- emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArg(regT2, 1);
emitPutJITStubArgConstant(registerOffset, 2);
emitPutJITStubArgConstant(argCount, 3);
}
@@ -97,7 +97,7 @@ void JIT::compileOpCallEvalSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
- emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArg(regT2, 1);
emitPutJITStubArgConstant(registerOffset, 2);
emitPutJITStubArgConstant(argCount, 3);
}
@@ -110,10 +110,10 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
int thisRegister = instruction[6].u.operand;
// ecx holds func
- emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArg(regT2, 1);
emitPutJITStubArgConstant(registerOffset, 2);
emitPutJITStubArgConstant(argCount, 3);
- emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
emitPutJITStubArgConstant(thisRegister, 5);
}
@@ -129,14 +129,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
compileOpCallEvalSetupArgs(instruction);
- emitCTICall(Interpreter::cti_op_call_eval);
- wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ emitCTICall(JITStubs::cti_op_call_eval);
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
}
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
// The arguments have been set up on the hot path for op_call_eval
if (opcodeID == op_call)
compileOpCallSetupArgs(instruction);
@@ -144,22 +144,22 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
compileOpConstructSetupArgs(instruction);
// Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(X86::ecx);
- addSlowCase(jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)));
+ emitJumpSlowCaseIfNotJSCell(regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitCTICall(JITStubs::cti_op_construct_JSConstruct);
emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), X86::edx);
+ move(Imm32(argCount), regT1);
- emitNakedCall(m_interpreter->m_ctiVirtualCall);
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -178,7 +178,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
linkSlowCase(iter);
// This handles host functions
- emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
+ emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction));
// Put the return value in dst. In the interpreter, op_ret does this.
emitPutVirtualRegister(dst);
@@ -187,12 +187,6 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
#else
-static NO_RETURN void unreachable()
-{
- ASSERT_NOT_REACHED();
- exit(1);
-}
-
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
int dst = instruction[1].u.operand;
@@ -203,18 +197,18 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
compileOpCallEvalSetupArgs(instruction);
- emitCTICall(Interpreter::cti_op_call_eval);
- wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ emitCTICall(JITStubs::cti_op_call_eval);
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
}
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
// This deliberately leaves the callee in ecx, used when setting up the stack frame below
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
DataLabelPtr addressOfLinkedFunctionCheck;
- Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
@@ -226,25 +220,25 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int proto = instruction[5].u.operand;
int thisRegister = instruction[6].u.operand;
- emitPutJITStubArg(X86::ecx, 1);
- emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax);
- emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitPutJITStubArg(regT2, 1);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
+ emitCTICall(JITStubs::cti_op_construct_JSConstruct);
emitPutVirtualRegister(thisRegister);
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
}
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
- storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain
+ storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
- storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
// Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable));
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(0));
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -271,24 +265,24 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
compileOpConstructSetupArgs(instruction);
// Fast check for JS function.
- Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx);
- Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT2);
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitCTICall(JITStubs::cti_op_construct_JSConstruct);
emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
}
- move(Imm32(argCount), X86::edx);
+ move(Imm32(argCount), regT1);
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
- emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink);
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink());
Jump storeResultForFirstRun = jump();
@@ -303,14 +297,14 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
compileOpConstructSetupArgs(instruction);
// Check for JSFunctions.
- Jump isNotObject = emitJumpIfNotJSCell(X86::ecx);
- Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));
+ Jump isNotObject = emitJumpIfNotJSCell(regT2);
+ Jump isJSFunction = branchPtr(Equal, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
// This handles host functions
isNotObject.link(this);
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
- emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
+ emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction));
Jump wasNotJSFunction = jump();
// Next, handle JSFunctions...
@@ -318,17 +312,17 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitCTICall(JITStubs::cti_op_construct_JSConstruct);
emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, X86::ecx);
+ emitGetVirtualRegister(callee, regT2);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), X86::edx);
+ move(Imm32(argCount), regT1);
- emitNakedCall(m_interpreter->m_ctiVirtualCall);
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
// Put the return value in dst. In the interpreter, op_ret does this.
wasNotJSFunction.link(this);
diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h
new file mode 100644
index 0000000..0490d0e
--- /dev/null
+++ b/JavaScriptCore/jit/JITCode.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCode_h
+#define JITCode_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "CallFrame.h"
+#include "JSValue.h"
+#include "Profiler.h"
+
+namespace JSC {
+
+ class JSGlobalData;
+ class RegisterFile;
+
+ extern "C" {
+ JSValueEncodedAsPointer* ctiTrampoline(
+#if PLATFORM(X86_64)
+ // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
+ // We can allow register passing here, and move the writes of these values into the trampoline.
+ void*, void*, void*, void*, void*, void*,
+#endif
+ void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*);
+ };
+
+ class JITCode {
+ public:
+ JITCode(void* code)
+ : code(code)
+ {
+ }
+
+ operator bool()
+ {
+ return code != 0;
+ }
+
+ void* addressForCall()
+ {
+ return code;
+ }
+
+ // This function returns the offset in bytes of 'pointerIntoCode' into
+ // this block of code. The pointer provided must be a pointer into this
+ // block of code. It is ASSERTed that no codeblock >4gb in size.
+ unsigned offsetOf(void* pointerIntoCode)
+ {
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(code);
+ ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
+ return static_cast<unsigned>(result);
+ }
+
+ // Execute the code!
+ inline JSValuePtr execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception)
+ {
+ return JSValuePtr::decode(ctiTrampoline(
+#if PLATFORM(X86_64)
+ 0, 0, 0, 0, 0, 0,
+#endif
+ code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ }
+
+ private:
+ void* code;
+ };
+
+};
+
+#endif
+
+#endif
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 7a97cd8..684c404 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -69,8 +69,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (!atJumpTarget) {
// The argument we want is already stored in eax
- if (dst != X86::eax)
- move(X86::eax, dst);
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
killLastResultRegister();
return;
}
@@ -177,7 +177,7 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeader
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max();
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
// FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
}
@@ -187,20 +187,11 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
// FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
}
-ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(X86::RegisterID r)
+ALWAYS_INLINE JIT::Call JIT::emitNakedCall(void* function)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
- Jump nakedCall = call(r);
- m_calls.append(CallRecord(nakedCall, m_bytecodeIndex));
- return nakedCall;
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- Jump nakedCall = call();
+ Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function));
return nakedCall;
}
@@ -208,25 +199,21 @@ ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function)
#if USE(JIT_STUB_ARGUMENT_REGISTER)
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
-#if PLATFORM(X86_64)
- move(X86::esp, X86::edi);
-#else
- move(X86::esp, X86::ecx);
-#endif
+ move(stackPointerRegister, firstArgumentRegister);
emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
// In the trampoline on x86-64, the first argument register is not overwritten.
#if !PLATFORM(X86_64)
- move(X86::esp, X86::ecx);
- addPtr(Imm32(sizeof(void*)), X86::ecx);
+ move(stackPointerRegister, firstArgumentRegister);
+ addPtr(Imm32(sizeof(void*)), firstArgumentRegister);
#endif
}
#elif USE(JIT_STUB_ARGUMENT_STACK)
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
- storePtr(X86::esp, X86::esp);
+ poke(stackPointerRegister);
emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
@@ -238,7 +225,7 @@ ALWAYS_INLINE void JIT::restoreArgumentReference()
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
#endif
-ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper)
+ALWAYS_INLINE JIT::Call JIT::emitCTICall_internal(void* helper)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
@@ -246,7 +233,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper)
sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, true);
#endif
restoreArgumentReference();
- Jump ctiCall = call();
+ Call ctiCall = call();
m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper));
#if ENABLE(OPCODE_SAMPLING)
sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, false);
@@ -258,15 +245,15 @@ ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper)
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return jnePtr(Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
+ return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
{
#if USE(ALTERNATE_JSIMMEDIATE)
- return jzPtr(reg, tagMaskRegister);
+ return branchTestPtr(Zero, reg, tagMaskRegister);
#else
- return jz32(reg, Imm32(JSImmediate::TagMask));
+ return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
#endif
}
@@ -285,9 +272,9 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
{
#if USE(ALTERNATE_JSIMMEDIATE)
- return jnzPtr(reg, tagMaskRegister);
+ return branchTestPtr(NonZero, reg, tagMaskRegister);
#else
- return jnz32(reg, Imm32(JSImmediate::TagMask));
+ return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
#endif
}
@@ -311,29 +298,29 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
#if USE(ALTERNATE_JSIMMEDIATE)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
{
- return jnzPtr(reg, tagTypeNumberRegister);
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
{
- return jzPtr(reg, tagTypeNumberRegister);
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
}
#endif
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
#if USE(ALTERNATE_JSIMMEDIATE)
- return jaePtr(reg, tagTypeNumberRegister);
+ return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
#else
- return jnz32(reg, Imm32(JSImmediate::TagTypeNumber));
+ return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
#endif
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
#if USE(ALTERNATE_JSIMMEDIATE)
- return jbPtr(reg, tagTypeNumberRegister);
+ return branchPtr(Below, reg, tagTypeNumberRegister);
#else
- return jz32(reg, Imm32(JSImmediate::TagTypeNumber));
+ return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
#endif
}
@@ -362,7 +349,7 @@ ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
{
- return jzSubPtr(Imm32(JSImmediate::TagTypeNumber), reg);
+ return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
}
#endif
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 6740bec..ce90ee4 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -53,11 +53,11 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident,
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
- emitGetVirtualRegister(baseVReg, X86::eax);
+ emitGetVirtualRegister(baseVReg, regT0);
- emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(regT0, 1);
emitPutJITStubArgConstant(ident, 2);
- emitCTICall(Interpreter::cti_op_get_by_id_generic);
+ emitCTICall(JITStubs::cti_op_get_by_id_generic);
emitPutVirtualRegister(resultVReg);
}
@@ -73,12 +73,12 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// such that the Structure & offset are always at the same distance from this.
- emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 3);
- emitCTICall(Interpreter::cti_op_put_by_id_generic);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 3);
+ emitCTICall(JITStubs::cti_op_put_by_id_generic);
}
void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
@@ -95,21 +95,21 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsig
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
- emitGetVirtualRegister(baseVReg, X86::eax);
+ emitGetVirtualRegister(baseVReg, regT0);
- emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
- Jump structureCheck = jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(X86::eax, patchGetByIdDefaultOffset), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
@@ -132,9 +132,9 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
#ifndef NDEBUG
Label coldPathBegin(this);
#endif
- emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(regT0, 1);
emitPutJITStubArgConstant(ident, 2);
- Jump call = emitCTICall(Interpreter::cti_op_get_by_id);
+ Call call = emitCTICall(JITStubs::cti_op_get_by_id);
emitPutVirtualRegister(resultVReg);
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
@@ -149,22 +149,22 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsign
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// such that the Structure & offset are always at the same distance from this.
- emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
- emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(X86::edx, Address(X86::eax, patchGetByIdDefaultOffset));
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
}
@@ -174,9 +174,9 @@ void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<Sl
linkSlowCase(iter);
emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 3);
- Jump call = emitCTICall(Interpreter::cti_op_put_by_id);
+ emitPutJITStubArg(regT0, 1);
+ emitPutJITStubArg(regT1, 3);
+ Call call = emitCTICall(JITStubs::cti_op_put_by_id);
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
@@ -193,54 +193,55 @@ static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Str
return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
- failureCases.append(emitJumpIfNotJSCell(X86::eax));
- failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
+ failureCases.append(emitJumpIfNotJSCell(regT0));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
JumpList successCases;
// ecx = baseObject
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
// proto(ecx) = baseObject->structure()->prototype()
- failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
// ecx = baseObject->m_structure
for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
// null check the prototype
- successCases.append(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull()))));
+ successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValuePtr::encode(jsNull()))));
// Check the structure id
- failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
+ failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
}
successCases.link(this);
- Jump callTarget;
+ Call callTarget;
// emit a call only if storage realloc is needed
- if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
+ bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure);
+ if (willNeedStorageRealloc) {
pop(X86::ebx);
#if PLATFORM(X86_64)
- move(Imm32(newStructure->propertyStorageCapacity()), X86::edx);
+ move(Imm32(newStructure->propertyStorageCapacity()), regT1);
move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
- move(X86::eax, X86::edi);
+ move(regT0, X86::edi);
callTarget = call();
#else
push(Imm32(newStructure->propertyStorageCapacity()));
push(Imm32(oldStructure->propertyStorageCapacity()));
- push(X86::eax);
+ push(regT0);
callTarget = call();
addPtr(Imm32(3 * sizeof(void*)), X86::esp);
#endif
- emitGetJITStubArg(3, X86::edx);
+ emitGetJITStubArg(3, regT1);
push(X86::ebx);
}
@@ -248,150 +249,144 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// codeblock should ensure oldStructure->m_refCount > 0
sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)));
+ storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure)));
// write the value
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr)));
ret();
- Jump failureJump;
- bool plantedFailureJump = false;
- if (!failureCases.empty()) {
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- failureJump = jump();
- plantedFailureJump = true;
- }
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
- if (plantedFailureJump)
- patchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail);
- if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
- patchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage));
-
- stubInfo->stubRoutine = code;
+ if (willNeedStorageRealloc)
+ patchBuffer.link(callTarget, resizePropertyStorage);
- Jump::patch(returnAddress, code);
+ stubInfo->stubRoutine = patchBuffer.entry();
+
+ returnAddress.relinkCallerToFunction(code);
}
-void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail);
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdStructure);
- void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPropertyMapOffset);
- DataLabelPtr::patch(structureAddress, structure);
- DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+ stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure);
+ stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr));
}
-void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
+ // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic);
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdStructure;
- void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdPropertyMapOffset;
- DataLabelPtr::patch(structureAddress, structure);
- DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+ stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure);
+ stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr));
}
-void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
+void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress)
{
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+ returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail);
// Check eax is an array
- Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
- load32(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+ load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2);
- Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt));
+ Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
- emitFastArithIntToImmNoCheck(X86::ecx, X86::eax);
+ emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, hotPathPutResult);
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
+ CodeLocationLabel entryLabel = patchBuffer.entry();
+ stubInfo->stubRoutine = entryLabel;
- // Finally patch the jump to sow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
}
-void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump failureCases2 = checkStructure(regT0, structure);
// Checks out okay! - getDirectOffset
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0);
ret();
+ Call failureCases1Call = makeTailRecursiveCall(failureCases1);
+ Call failureCases2Call = makeTailRecursiveCall(failureCases2);
+
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+ patchBuffer.link(failureCases1Call, JITStubs::cti_op_get_by_id_self_fail);
+ patchBuffer.link(failureCases2Call, JITStubs::cti_op_get_by_id_self_fail);
- stubInfo->stubRoutine = code;
+ stubInfo->stubRoutine = patchBuffer.entry();
- Jump::patch(returnAddress, code);
+ returnAddress.relinkCallerToFunction(code);
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
{
#if USE(CTI_REPATCH_PIC)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+ returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx);
+ loadPtr(static_cast<void*>(protoPropertyStorage), regT1);
// Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
#if PLATFORM(X86_64)
- move(ImmPtr(prototypeStructure), X86::ebx);
- Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
#endif
// Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
Jump success = jump();
@@ -399,59 +394,59 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
+ CodeLocationLabel entryLabel = patchBuffer.entry();
+ stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
#else
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
+ loadPtr(protoPropertyStorage, regT1);
// Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump failureCases2 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
- Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases3 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
// Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
ret();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
- patchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+ patchBuffer.link(failureCases1, JITStubs::cti_op_get_by_id_proto_fail);
+ patchBuffer.link(failureCases2, JITStubs::cti_op_get_by_id_proto_fail);
+ patchBuffer.link(failureCases3, JITStubs::cti_op_get_by_id_proto_fail);
- stubInfo->stubRoutine = code;
+ stubInfo->stubRoutine = patchBuffer.entry();
- Jump::patch(returnAddress, code);
+ returnAddress.relinkCallerToFunction(code);
#endif
}
#if USE(CTI_REPATCH_PIC)
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
{
- Jump failureCase = checkStructure(X86::eax, structure);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ Jump failureCase = checkStructure(regT0, structure);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0);
Jump success = jump();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
@@ -459,22 +454,23 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
if (!lastProtoBegin)
- lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
patchBuffer.link(failureCase, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.entry();
structure->ref();
- polymorphicStructures->list[currentIndex].set(code, structure);
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
}
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
@@ -483,22 +479,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
+ loadPtr(protoPropertyStorage, regT1);
// Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
#if PLATFORM(X86_64)
- move(ImmPtr(prototypeStructure), X86::ebx);
- Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
#endif
// Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
Jump success = jump();
@@ -506,21 +502,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
patchBuffer.link(failureCases1, lastProtoBegin);
patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.entry();
structure->ref();
prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure);
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
}
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
@@ -530,7 +527,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
- Jump baseObjectCheck = checkStructure(X86::eax, structure);
+ Jump baseObjectCheck = checkStructure(regT0, structure);
bucketsOfFail.append(baseObjectCheck);
Structure* currStructure = structure;
@@ -543,54 +540,55 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(protoPropertyStorage, regT1);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
Jump success = jump();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.entry();
// Track the stub we have created so that it will be deleted later.
structure->ref();
chain->ref();
- prototypeStructures->list[currentIndex].set(code, structure, chain);
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
}
#endif
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
{
#if USE(CTI_REPATCH_PIC)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+ returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
ASSERT(count);
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(X86::eax, structure));
+ bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
RefPtr<Structure>* chainEntries = chain->head();
@@ -602,45 +600,43 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(protoPropertyStorage, regT1);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
Jump success = jump();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
-
- patchBuffer.link(bucketsOfFail, slowCaseBegin);
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
+ CodeLocationLabel entryLabel = patchBuffer.entry();
+ stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ jumpLocation.relink(entryLabel);
#else
ASSERT(count);
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
- bucketsOfFail.append(emitJumpIfNotJSCell(X86::eax));
- bucketsOfFail.append(checkStructure(X86::eax, structure));
+ bucketsOfFail.append(emitJumpIfNotJSCell(regT0));
+ bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
RefPtr<Structure>* chainEntries = chain->head();
@@ -652,49 +648,52 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, regT3, AbsoluteAddress(prototypeStructureAddress)));
#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ loadPtr(protoPropertyStorage, regT1);
+ loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
ret();
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- patchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+ patchBuffer.link(bucketsOfFail, JITStubs::cti_op_get_by_id_proto_fail);
- stubInfo->stubRoutine = code;
+ stubInfo->stubRoutine = patchBuffer.entry();
- Jump::patch(returnAddress, code);
+ returnAddress.relinkCallerToFunction(code);
#endif
}
-void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump failureCases2 = checkStructure(regT0, structure);
// checks out okay! - putDirectOffset
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
+ loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr)));
ret();
+ Call failureCases1Call = makeTailRecursiveCall(failureCases1);
+ Call failureCases2Call = makeTailRecursiveCall(failureCases2);
+
void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
PatchBuffer patchBuffer(code);
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCases1Call, JITStubs::cti_op_put_by_id_fail);
+ patchBuffer.link(failureCases2Call, JITStubs::cti_op_put_by_id_fail);
- stubInfo->stubRoutine = code;
+ stubInfo->stubRoutine = patchBuffer.entry();
- Jump::patch(returnAddress, code);
+ returnAddress.relinkCallerToFunction(code);
}
#endif
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
new file mode 100644
index 0000000..de528a5
--- /dev/null
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -0,0 +1,2196 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITStubs.h"
+
+#if ENABLE(JIT)
+
+#include "Arguments.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Collector.h"
+#include "Debugger.h"
+#include "ExceptionHelpers.h"
+#include "GlobalEvalFunction.h"
+#include "JIT.h"
+#include "JSActivation.h"
+#include "JSArray.h"
+#include "JSByteArray.h"
+#include "JSFunction.h"
+#include "JSNotAnObject.h"
+#include "JSPropertyNameIterator.h"
+#include "JSStaticScopeObject.h"
+#include "JSString.h"
+#include "ObjectPrototype.h"
+#include "Operations.h"
+#include "Parser.h"
+#include "Profiler.h"
+#include "RegExpObject.h"
+#include "RegExpPrototype.h"
+#include "Register.h"
+#include "SamplingTool.h"
+#include <stdio.h>
+
+using namespace std;
+
+namespace JSC {
+
+#if ENABLE(OPCODE_SAMPLING)
+ #define CTI_SAMPLER ARG_globalData->interpreter->sampler()
+#else
+ #define CTI_SAMPLER 0
+#endif
+
+JITStubs::JITStubs(JSGlobalData* globalData)
+ : m_ctiArrayLengthTrampoline(0)
+ , m_ctiStringLengthTrampoline(0)
+ , m_ctiVirtualCallPreLink(0)
+ , m_ctiVirtualCallLink(0)
+ , m_ctiVirtualCall(0)
+{
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall);
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot& slot)
+{
+ // The interpreter checks for recursion here; I do not believe this can occur in CTI.
+
+ if (!baseValue.isCell())
+ return;
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = baseCell->structure();
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ // If baseCell != base, then baseCell must be a proxy for another object.
+ if (baseCell != slot.base()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ // Structure transition, cache transition info
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
+ JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress);
+ return;
+ }
+
+ stubInfo->initPutByIdReplace(structure);
+
+#if USE(CTI_REPATCH_PIC)
+ JIT::patchPutByIdReplace(stubInfo, structure, slot.cachedOffset(), returnAddress);
+#else
+ JIT::compilePutByIdReplace(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+#endif
+}
+
+NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot& slot)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSGlobalData* globalData = &callFrame->globalData();
+
+ if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+#if USE(CTI_REPATCH_PIC)
+ JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
+#else
+ ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiArrayLengthTrampoline());
+#endif
+ return;
+ }
+
+ if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+ // The tradeoff of compiling an patched inline string length access routine does not seem
+ // to pay off, so we currently only do this for arrays.
+ ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
+ return;
+ }
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = baseCell->structure();
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ // In the interpreter the last structure is trapped here; in CTI we use the
+ // *_second method to achieve a similar (but not quite the same) effect.
+
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ if (slot.slotBase() == baseValue) {
+ // set this up, so derefStructures can do it's job.
+ stubInfo->initGetByIdSelf(structure);
+
+#if USE(CTI_REPATCH_PIC)
+ JIT::patchGetByIdSelf(stubInfo, structure, slot.cachedOffset(), returnAddress);
+#else
+ JIT::compileGetByIdSelf(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+#endif
+ return;
+ }
+
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ ASSERT(slot.slotBase().isObject());
+
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
+
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), slot.cachedOffset(), returnAddress);
+ return;
+ }
+
+ size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot);
+ if (!count) {
+ stubInfo->opcodeID = op_get_by_id_generic;
+ return;
+ }
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initGetByIdChain(structure, prototypeChain);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress);
+}
+
+#endif
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
+#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK
+#define SETUP_VA_LISTL_ARGS
+#endif
+
+#ifndef NDEBUG
+
+extern "C" {
+
+static void jscGeneratedNativeCode()
+{
+ // When executing a CTI function (which might do an allocation), we hack the return address
+ // to pretend to be executing this function, to keep stack logging tools from blowing out
+ // memory.
+}
+
+}
+
+struct StackHack {
+ ALWAYS_INLINE StackHack(void** location)
+ {
+ returnAddressLocation = location;
+ savedReturnAddress = *returnAddressLocation;
+ ctiSetReturnAddress(returnAddressLocation, reinterpret_cast<void*>(jscGeneratedNativeCode));
+ }
+ ALWAYS_INLINE ~StackHack()
+ {
+ ctiSetReturnAddress(returnAddressLocation, savedReturnAddress);
+ }
+
+ void** returnAddressLocation;
+ void* savedReturnAddress;
+};
+
+#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS; StackHack stackHack(&STUB_RETURN_ADDRESS_SLOT)
+#define STUB_SET_RETURN_ADDRESS(address) stackHack.savedReturnAddress = address
+#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
+
+#else
+
+#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS
+#define STUB_SET_RETURN_ADDRESS(address) ctiSetReturnAddress(&STUB_RETURN_ADDRESS_SLOT, address);
+#define STUB_RETURN_ADDRESS STUB_RETURN_ADDRESS_SLOT
+
+#endif
+
+// The reason this is not inlined is to avoid having to do a PIC branch
+// to get the address of the ctiVMThrowTrampoline function. It's also
+// good to keep the code size down by leaving as much of the exception
+// handling code out of line as possible.
+static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot)
+{
+ ASSERT(globalData->exception);
+ globalData->exceptionLocation = exceptionLocation;
+ ctiSetReturnAddress(&returnAddressSlot, reinterpret_cast<void*>(ctiVMThrowTrampoline));
+}
+
+static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot)
+{
+ globalData->exception = createStackOverflowError(callFrame);
+ returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot);
+}
+
+#define VM_THROW_EXCEPTION() \
+ do { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return 0; \
+ } while (0)
+#define VM_THROW_EXCEPTION_2() \
+ do { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ RETURN_PAIR(0, 0); \
+ } while (0)
+#define VM_THROW_EXCEPTION_AT_END() \
+ returnToThrowTrampoline(ARG_globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS)
+
+#define CHECK_FOR_EXCEPTION() \
+ do { \
+ if (UNLIKELY(ARG_globalData->exception != noValue())) \
+ VM_THROW_EXCEPTION(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_AT_END() \
+ do { \
+ if (UNLIKELY(ARG_globalData->exception != noValue())) \
+ VM_THROW_EXCEPTION_AT_END(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_VOID() \
+ do { \
+ if (UNLIKELY(ARG_globalData->exception != noValue())) { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return; \
+ } \
+ } while (0)
+
+JSObject* JITStubs::cti_op_convert_this(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v1 = ARG_src1;
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSObject* result = v1.toThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+void JITStubs::cti_op_end(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ScopeChainNode* scopeChain = ARG_callFrame->scopeChain();
+ ASSERT(scopeChain->refCount > 1);
+ scopeChain->deref();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_add(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v1 = ARG_src1;
+ JSValuePtr v2 = ARG_src2;
+
+ double left;
+ double right = 0.0;
+
+ bool rightIsNumber = v2.getNumber(right);
+ if (rightIsNumber && v1.getNumber(left))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left + right));
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool leftIsString = v1.isString();
+ if (leftIsString && v2.isString()) {
+ RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep());
+ if (UNLIKELY(!value)) {
+ throwOutOfMemoryError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+
+ return JSValuePtr::encode(jsString(ARG_globalData, value.release()));
+ }
+
+ if (rightIsNumber & leftIsString) {
+ RefPtr<UString::Rep> value = v2.isInt32Fast() ?
+ concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) :
+ concatenate(asString(v1)->value().rep(), right);
+
+ if (UNLIKELY(!value)) {
+ throwOutOfMemoryError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ return JSValuePtr::encode(jsString(ARG_globalData, value.release()));
+ }
+
+ // All other cases are pretty uncommon
+ JSValuePtr result = jsAddSlowCase(callFrame, v1, v2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_pre_inc(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) + 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+int JITStubs::cti_timeout_check(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSGlobalData* globalData = ARG_globalData;
+ TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
+
+ if (timeoutChecker.didTimeOut(ARG_callFrame)) {
+ globalData->exception = createInterruptedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ }
+
+ return timeoutChecker.ticksUntilNextCheck();
+}
+
+void JITStubs::cti_register_file_check(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ if (LIKELY(ARG_registerFile->grow(ARG_callFrame + ARG_callFrame->codeBlock()->m_numCalleeRegisters)))
+ return;
+
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ CallFrame* oldCallFrame = ARG_callFrame->callerFrame();
+ ARG_setCallFrame(oldCallFrame);
+ throwStackOverflowError(oldCallFrame, ARG_globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS);
+}
+
+int JITStubs::cti_op_loop_if_less(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool result = jsLess(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+int JITStubs::cti_op_loop_if_lesseq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+JSObject* JITStubs::cti_op_new_object(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return constructEmptyObject(ARG_callFrame);
+}
+
+void JITStubs::cti_op_put_by_id_generic(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ PutPropertySlot slot;
+ ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_generic(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+void JITStubs::cti_op_put_by_id(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ PutPropertySlot slot;
+ ARG_src1.put(callFrame, ident, ARG_src3, slot);
+
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_id_second));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+void JITStubs::cti_op_put_by_id_second(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ PutPropertySlot slot;
+ ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot);
+ tryCachePutByID(ARG_callFrame, ARG_callFrame->codeBlock(), STUB_RETURN_ADDRESS, ARG_src1, slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+void JITStubs::cti_op_put_by_id_fail(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ PutPropertySlot slot;
+ ARG_src1.put(callFrame, ident, ARG_src3, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(callFrame, ident, slot);
+
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_second));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_second(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(callFrame, ident, slot);
+
+ tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_self_fail(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ Identifier& ident = *ARG_id2;
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !asCell(baseValue)->structure()->isDictionary()
+ && slot.slotBase() == baseValue) {
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo->opcodeID == op_get_by_id_self) {
+ ASSERT(!stubInfo->stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(MacroAssembler::CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
+ stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
+ } else {
+ polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
+ listIndex = stubInfo->u.getByIdSelfList.listSize;
+ stubInfo->u.getByIdSelfList.listSize++;
+ }
+
+ JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic));
+ } else {
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic));
+ }
+ return JSValuePtr::encode(result);
+}
+
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
+{
+ PolymorphicAccessStructureList* prototypeStructureList = 0;
+ listIndex = 1;
+
+ switch (stubInfo->opcodeID) {
+ case op_get_by_id_proto:
+ prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
+ stubInfo->stubRoutine.reset();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case op_get_by_id_chain:
+ prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
+ stubInfo->stubRoutine.reset();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case op_get_by_id_proto_list:
+ prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
+ listIndex = stubInfo->u.getByIdProtoList.listSize;
+ stubInfo->u.getByIdProtoList.listSize++;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ return prototypeStructureList;
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(callFrame, *ARG_id2, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
+ return JSValuePtr::encode(result);
+ }
+
+ Structure* structure = asCell(baseValue)->structure();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ if (slot.slotBase() == baseValue)
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
+ else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full));
+ } else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) {
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, structure->prototypeChain(callFrame), count, slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full));
+ } else
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
+
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list_full(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_fail(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_array_fail(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_string_fail(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr baseValue = ARG_src1;
+ PropertySlot slot(baseValue);
+ JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+#endif
+
+JSValueEncodedAsPointer* JITStubs::cti_op_instanceof(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr value = ARG_src1;
+ JSValuePtr baseVal = ARG_src2;
+ JSValuePtr proto = ARG_src3;
+
+ // at least one of these checks must have failed to get to the slow case
+ ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
+ || !value.isObject() || !baseVal.isObject() || !proto.isObject()
+ || (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
+
+ if (!baseVal.isObject()) {
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+
+ JSObject* baseObj = asObject(baseVal);
+ TypeInfo typeInfo = baseObj->structure()->typeInfo();
+ if (!typeInfo.implementsHasInstance())
+ return JSValuePtr::encode(jsBoolean(false));
+
+ if (!typeInfo.overridesHasInstance()) {
+ if (!proto.isObject()) {
+ throwError(callFrame, TypeError, "instanceof called on an object with an invalid prototype property.");
+ VM_THROW_EXCEPTION();
+ }
+
+ if (!value.isObject())
+ return JSValuePtr::encode(jsBoolean(false));
+ }
+
+ JSValuePtr result = jsBoolean(baseObj->hasInstance(callFrame, value, proto));
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_del_by_id(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSObject* baseObj = ARG_src1.toObject(callFrame);
+
+ JSValuePtr result = jsBoolean(baseObj->deleteProperty(callFrame, *ARG_id2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_mul(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left * right));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSObject* JITStubs::cti_op_new_func(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return ARG_func1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain());
+}
+
+void* JITStubs::cti_op_call_JSFunction(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+#ifndef NDEBUG
+ CallData callData;
+ ASSERT(ARG_src1.getCallData(callData) == CallTypeJS);
+#endif
+
+ ScopeChainNode* callDataScopeChain = asFunction(ARG_src1)->scope().node();
+ CodeBlock* newCodeBlock = &asFunction(ARG_src1)->body()->bytecode(callDataScopeChain);
+
+ if (!newCodeBlock->jitCode())
+ JIT::compile(ARG_globalData, newCodeBlock);
+
+ return newCodeBlock;
+}
+
+VoidPtrPair JITStubs::cti_op_call_arityCheck(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* newCodeBlock = ARG_codeBlock4;
+ int argCount = ARG_int3;
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ Register* r = callFrame->registers() + numParameters;
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ Register* r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!ARG_registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ARG_setCallFrame(oldCallFrame);
+ throwStackOverflowError(oldCallFrame, ARG_globalData, ARG_returnAddress2, STUB_RETURN_ADDRESS);
+ RETURN_PAIR(0, 0);
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ }
+
+ RETURN_PAIR(newCodeBlock, callFrame);
+}
+
+void* JITStubs::cti_vm_dontLazyLinkCall(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSGlobalData* globalData = ARG_globalData;
+ JSFunction* callee = asFunction(ARG_src1);
+ CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node());
+ if (!codeBlock->jitCode())
+ JIT::compile(globalData, codeBlock);
+
+ ctiPatchNearCallByReturnAddress(ARG_returnAddress2, globalData->jitStubs.ctiVirtualCallLink());
+
+ return codeBlock->jitCode().addressForCall();
+}
+
+void* JITStubs::cti_vm_lazyLinkCall(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSFunction* callee = asFunction(ARG_src1);
+ CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node());
+ if (!codeBlock->jitCode())
+ JIT::compile(ARG_globalData, codeBlock);
+
+ CallLinkInfo* callLinkInfo = &ARG_callFrame->callerFrame()->codeBlock()->getCallLinkInfo(ARG_returnAddress2);
+ JIT::linkCall(callee, codeBlock, codeBlock->jitCode(), callLinkInfo, ARG_int3);
+
+ return codeBlock->jitCode().addressForCall();
+}
+
+JSObject* JITStubs::cti_op_push_activation(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSActivation* activation = new (ARG_globalData) JSActivation(ARG_callFrame, static_cast<FunctionBodyNode*>(ARG_callFrame->codeBlock()->ownerNode()));
+ ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->copy()->push(activation));
+ return activation;
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_call_NotJSFunction(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr funcVal = ARG_src1;
+
+ CallData callData;
+ CallType callType = funcVal.getCallData(callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ int registerOffset = ARG_int2;
+ int argCount = ARG_int3;
+ CallFrame* previousCallFrame = ARG_callFrame;
+ CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+
+ callFrame->init(0, static_cast<Instruction*>(STUB_RETURN_ADDRESS), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
+ ARG_setCallFrame(callFrame);
+
+ Register* argv = ARG_callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
+ ArgList argList(argv + 1, argCount - 1);
+
+ JSValuePtr returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+
+ // FIXME: All host methods should be calling toThisObject, but this is not presently the case.
+ JSValuePtr thisValue = argv[0].jsValue(callFrame);
+ if (thisValue == jsNull())
+ thisValue = callFrame->globalThisValue();
+
+ returnValue = callData.native.function(callFrame, asObject(funcVal), thisValue, argList);
+ }
+ ARG_setCallFrame(previousCallFrame);
+ CHECK_FOR_EXCEPTION();
+
+ return JSValuePtr::encode(returnValue);
+ }
+
+ ASSERT(callType == CallTypeNone);
+
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createNotAFunctionError(ARG_callFrame, funcVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+void JITStubs::cti_op_create_arguments(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame);
+ ARG_callFrame->setCalleeArguments(arguments);
+ ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments;
+}
+
+void JITStubs::cti_op_create_arguments_no_params(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame, Arguments::NoParameters);
+ ARG_callFrame->setCalleeArguments(arguments);
+ ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments;
+}
+
+void JITStubs::cti_op_tear_off_activation(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain());
+ asActivation(ARG_src1)->copyRegisters(ARG_callFrame->optionalCalleeArguments());
+}
+
+void JITStubs::cti_op_tear_off_arguments(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ASSERT(ARG_callFrame->codeBlock()->usesArguments() && !ARG_callFrame->codeBlock()->needsFullScopeChain());
+ ARG_callFrame->optionalCalleeArguments()->copyRegisters();
+}
+
+void JITStubs::cti_op_profile_will_call(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ASSERT(*ARG_profilerReference);
+ (*ARG_profilerReference)->willExecute(ARG_callFrame, ARG_src1);
+}
+
+void JITStubs::cti_op_profile_did_call(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ASSERT(*ARG_profilerReference);
+ (*ARG_profilerReference)->didExecute(ARG_callFrame, ARG_src1);
+}
+
+void JITStubs::cti_op_ret_scopeChain(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain());
+ ARG_callFrame->scopeChain()->deref();
+}
+
+JSObject* JITStubs::cti_op_new_array(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ArgList argList(&ARG_callFrame->registers()[ARG_int1], ARG_int2);
+ return constructArray(ARG_callFrame, argList);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_resolve(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+
+ Identifier& ident = *ARG_id1;
+ do {
+ JSObject* o = *iter;
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValuePtr result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+ }
+ } while (++iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+JSObject* JITStubs::cti_op_construct_JSConstruct(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+#ifndef NDEBUG
+ ConstructData constructData;
+ ASSERT(asFunction(ARG_src1)->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ if (ARG_src4.isObject())
+ structure = asObject(ARG_src4)->inheritorID();
+ else
+ structure = asFunction(ARG_src1)->scope().node()->globalObject()->emptyObjectStructure();
+ return new (ARG_globalData) JSObject(structure);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_construct_NotJSConstruct(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr constrVal = ARG_src1;
+ int argCount = ARG_int3;
+ int thisRegister = ARG_int5;
+
+ ConstructData constructData;
+ ConstructType constructType = constrVal.getConstructData(constructData);
+
+ if (constructType == ConstructTypeHost) {
+ ArgList argList(callFrame->registers() + thisRegister + 1, argCount - 1);
+
+ JSValuePtr returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = constructData.native.function(callFrame, asObject(constrVal), argList);
+ }
+ CHECK_FOR_EXCEPTION();
+
+ return JSValuePtr::encode(returnValue);
+ }
+
+ ASSERT(constructType == ConstructTypeNone);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSGlobalData* globalData = ARG_globalData;
+
+ JSValuePtr baseValue = ARG_src1;
+ JSValuePtr subscript = ARG_src2;
+
+ JSValuePtr result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSArray(globalData, baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canGetIndex(i))
+ result = jsArray->getIndex(i);
+ else
+ result = jsArray->JSArray::get(callFrame, i);
+ } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(ARG_globalData, i);
+ else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val_byte_array));
+ return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ } else
+ result = baseValue.get(callFrame, i);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val_byte_array(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSGlobalData* globalData = ARG_globalData;
+
+ JSValuePtr baseValue = ARG_src1;
+ JSValuePtr subscript = ARG_src2;
+
+ JSValuePtr result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+
+ result = baseValue.get(callFrame, i);
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val));
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+VoidPtrPair JITStubs::cti_op_resolve_func(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = *ARG_id1;
+ JSObject* base;
+ do {
+ base = *iter;
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ // ECMA 11.2.3 says that if we hit an activation the this value should be null.
+ // However, section 10.2.3 says that in the case where the value provided
+ // by the caller is null, the global object should be used. It also says
+ // that the section does not apply to internal functions, but for simplicity
+ // of implementation we use the global object anyway here. This guarantees
+ // that in host objects you always get a valid object for this.
+ // We also handle wrapper substitution for the global object at the same time.
+ JSObject* thisObj = base->toThisObject(callFrame);
+ JSValuePtr result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ RETURN_PAIR(thisObj, JSValuePtr::encode(result));
+ }
+ ++iter;
+ } while (iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_2();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_sub(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left - right));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+void JITStubs::cti_op_put_by_val(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSGlobalData* globalData = ARG_globalData;
+
+ JSValuePtr baseValue = ARG_src1;
+ JSValuePtr subscript = ARG_src2;
+ JSValuePtr value = ARG_src3;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSArray(globalData, baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canSetIndex(i))
+ jsArray->setIndex(i, value);
+ else
+ jsArray->JSArray::put(callFrame, i, value);
+ } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val_byte_array));
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32Fast()) {
+ jsByteArray->setIndex(i, value.getInt32Fast());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+
+ baseValue.put(callFrame, i, value);
+ } else
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+void JITStubs::cti_op_put_by_val_array(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr baseValue = ARG_src1;
+ int i = ARG_int2;
+ JSValuePtr value = ARG_src3;
+
+ ASSERT(isJSArray(ARG_globalData, baseValue));
+
+ if (LIKELY(i >= 0))
+ asArray(baseValue)->JSArray::put(callFrame, i, value);
+ else {
+ // This should work since we're re-boxing an immediate unboxed in JIT code.
+ ASSERT(JSValuePtr::makeInt32Fast(i));
+ Identifier property(callFrame, JSValuePtr::makeInt32Fast(i).toString(callFrame));
+ // FIXME: can toString throw an exception here?
+ if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+void JITStubs::cti_op_put_by_val_byte_array(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSGlobalData* globalData = ARG_globalData;
+
+ JSValuePtr baseValue = ARG_src1;
+ JSValuePtr subscript = ARG_src2;
+ JSValuePtr value = ARG_src3;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32Fast()) {
+ jsByteArray->setIndex(i, value.getInt32Fast());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+ }
+
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val));
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_lesseq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsBoolean(jsLessEq(callFrame, ARG_src1, ARG_src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+int JITStubs::cti_op_loop_if_true(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_negate(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src = ARG_src1;
+
+ double v;
+ if (src.getNumber(v))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, -v));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, -src.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_resolve_base(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(JSC::resolveBase(ARG_callFrame, *ARG_id1, ARG_callFrame->scopeChain()));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_resolve_skip(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ int skip = ARG_int2;
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+ while (skip--) {
+ ++iter;
+ ASSERT(iter != end);
+ }
+ Identifier& ident = *ARG_id1;
+ do {
+ JSObject* o = *iter;
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValuePtr result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+ }
+ } while (++iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_resolve_global(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSGlobalObject* globalObject = asGlobalObject(ARG_src1);
+ Identifier& ident = *ARG_id2;
+ unsigned globalResolveInfoIndex = ARG_int3;
+ ASSERT(globalObject->isGlobalObject());
+
+ PropertySlot slot(globalObject);
+ if (globalObject->getPropertySlot(callFrame, ident, slot)) {
+ JSValuePtr result = slot.getValue(callFrame, ident);
+ if (slot.isCacheable() && !globalObject->structure()->isDictionary()) {
+ GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
+ if (globalResolveInfo.structure)
+ globalResolveInfo.structure->deref();
+ globalObject->structure()->ref();
+ globalResolveInfo.structure = globalObject->structure();
+ globalResolveInfo.offset = slot.cachedOffset();
+ return JSValuePtr::encode(result);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+ }
+
+ unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
+ VM_THROW_EXCEPTION();
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_div(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left / right));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_pre_dec(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) - 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+int JITStubs::cti_op_jless(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool result = jsLess(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_not(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr result = jsBoolean(!src.toBoolean(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+int JITStubs::cti_op_jtrue(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+VoidPtrPair JITStubs::cti_op_post_inc(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() + 1)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_eq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2));
+ JSValuePtr result = jsBoolean(JSValuePtr::equalSlowCaseInline(callFrame, src1, src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_lshift(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr val = ARG_src1;
+ JSValuePtr shift = ARG_src2;
+
+ int32_t left;
+ uint32_t right;
+ if (JSValuePtr::areBothInt32Fast(val, shift))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
+ if (val.numberToInt32(left) && shift.numberToUInt32(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left << (right & 0x1f)));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_bitand(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ int32_t left;
+ int32_t right;
+ if (src1.numberToInt32(left) && src2.numberToInt32(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left & right));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_rshift(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr val = ARG_src1;
+ JSValuePtr shift = ARG_src2;
+
+ int32_t left;
+ uint32_t right;
+ if (JSFastMath::canDoFastRshift(val, shift))
+ return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ if (val.numberToInt32(left) && shift.numberToUInt32(right))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, left >> (right & 0x1f)));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_bitnot(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src = ARG_src1;
+
+ int value;
+ if (src.numberToInt32(value))
+ return JSValuePtr::encode(jsNumber(ARG_globalData, ~value));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsNumber(ARG_globalData, ~src.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+VoidPtrPair JITStubs::cti_op_resolve_with_base(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = *ARG_id1;
+ JSObject* base;
+ do {
+ base = *iter;
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ JSValuePtr result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ RETURN_PAIR(base, JSValuePtr::encode(result));
+ }
+ ++iter;
+ } while (iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_2();
+}
+
+JSObject* JITStubs::cti_op_new_func_exp(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return ARG_funcexp1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain());
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_mod(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr dividendValue = ARG_src1;
+ JSValuePtr divisorValue = ARG_src2;
+
+ CallFrame* callFrame = ARG_callFrame;
+ double d = dividendValue.toNumber(callFrame);
+ JSValuePtr result = jsNumber(ARG_globalData, fmod(d, divisorValue.toNumber(callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_less(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsBoolean(jsLess(callFrame, ARG_src1, ARG_src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_neq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2));
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr result = jsBoolean(!JSValuePtr::equalSlowCaseInline(callFrame, src1, src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+VoidPtrPair JITStubs::cti_op_post_dec(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v = ARG_src1;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() - 1)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_urshift(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr val = ARG_src1;
+ JSValuePtr shift = ARG_src2;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ if (JSFastMath::canDoFastUrshift(val, shift))
+ return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ else {
+ JSValuePtr result = jsNumber(ARG_globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+ }
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_bitxor(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSObject* JITStubs::cti_op_new_regexp(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return new (ARG_globalData) RegExpObject(ARG_callFrame->lexicalGlobalObject()->regExpStructure(), ARG_regexp1);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_bitor(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_call_eval(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ RegisterFile* registerFile = ARG_registerFile;
+
+ Interpreter* interpreter = ARG_globalData->interpreter;
+
+ JSValuePtr funcVal = ARG_src1;
+ int registerOffset = ARG_int2;
+ int argCount = ARG_int3;
+
+ Register* newCallFrame = callFrame->registers() + registerOffset;
+ Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
+ JSValuePtr thisValue = argv[0].jsValue(callFrame);
+ JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject();
+
+ if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
+ JSValuePtr exceptionValue = noValue();
+ JSValuePtr result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
+ if (UNLIKELY(exceptionValue != noValue())) {
+ ARG_globalData->exception = exceptionValue;
+ VM_THROW_EXCEPTION_AT_END();
+ }
+ return JSValuePtr::encode(result);
+ }
+
+ return JSValuePtr::encode(jsImpossibleValue());
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_throw(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+
+ JSValuePtr exceptionValue = ARG_src1;
+ ASSERT(exceptionValue);
+
+ HandlerInfo* handler = ARG_globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
+
+ if (!handler) {
+ *ARG_exception = exceptionValue;
+ return JSValuePtr::encode(jsNull());
+ }
+
+ ARG_setCallFrame(callFrame);
+ void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
+ ASSERT(catchRoutine);
+ STUB_SET_RETURN_ADDRESS(catchRoutine);
+ return JSValuePtr::encode(exceptionValue);
+}
+
+JSPropertyNameIterator* JITStubs::cti_op_get_pnames(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSPropertyNameIterator::create(ARG_callFrame, ARG_src1);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_next_pname(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSPropertyNameIterator* it = ARG_pni1;
+ JSValuePtr temp = it->next(ARG_callFrame);
+ if (!temp)
+ it->invalidate();
+ return JSValuePtr::encode(temp);
+}
+
+JSObject* JITStubs::cti_op_push_scope(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSObject* o = ARG_src1.toObject(ARG_callFrame);
+ CHECK_FOR_EXCEPTION();
+ ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->push(o));
+ return o;
+}
+
+void JITStubs::cti_op_pop_scope(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->pop());
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_typeof(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsTypeStringForValue(ARG_callFrame, ARG_src1));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_undefined(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr v = ARG_src1;
+ return JSValuePtr::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_boolean(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsBoolean(ARG_src1.isBoolean()));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_number(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsBoolean(ARG_src1.isNumber()));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_string(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsBoolean(isJSString(ARG_globalData, ARG_src1)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_object(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsBoolean(jsIsObjectType(ARG_src1)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_is_function(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ return JSValuePtr::encode(jsBoolean(jsIsFunctionType(ARG_src1)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_stricteq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ return JSValuePtr::encode(jsBoolean(JSValuePtr::strictEqual(src1, src2)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_nstricteq(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src1 = ARG_src1;
+ JSValuePtr src2 = ARG_src2;
+
+ return JSValuePtr::encode(jsBoolean(!JSValuePtr::strictEqual(src1, src2)));
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_to_jsnumber(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr src = ARG_src1;
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr result = src.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_in(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ JSValuePtr baseVal = ARG_src2;
+
+ if (!baseVal.isObject()) {
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ ARG_globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+
+ JSValuePtr propName = ARG_src1;
+ JSObject* baseObj = asObject(baseVal);
+
+ uint32_t i;
+ if (propName.getUInt32(i))
+ return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
+
+ Identifier property(callFrame, propName.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
+}
+
+JSObject* JITStubs::cti_op_push_new_scope(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSObject* scope = new (ARG_globalData) JSStaticScopeObject(ARG_callFrame, *ARG_id1, ARG_src2, DontDelete);
+
+ CallFrame* callFrame = ARG_callFrame;
+ callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
+ return scope;
+}
+
+void JITStubs::cti_op_jmp_scopes(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ unsigned count = ARG_int1;
+ CallFrame* callFrame = ARG_callFrame;
+
+ ScopeChainNode* tmp = callFrame->scopeChain();
+ while (count--)
+ tmp = tmp->pop();
+ callFrame->setScopeChain(tmp);
+}
+
+void JITStubs::cti_op_put_by_index(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ unsigned property = ARG_int2;
+
+ ARG_src1.put(callFrame, property, ARG_src3);
+}
+
+void* JITStubs::cti_op_switch_imm(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr scrutinee = ARG_src1;
+ unsigned tableIndex = ARG_int2;
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ if (scrutinee.isInt32Fast())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).addressForSwitch();
+ else {
+ double value;
+ int32_t intValue;
+ if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value))
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).addressForSwitch();
+ else
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+ }
+}
+
+void* JITStubs::cti_op_switch_char(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr scrutinee = ARG_src1;
+ unsigned tableIndex = ARG_int2;
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+
+ if (scrutinee.isString()) {
+ UString::Rep* value = asString(scrutinee)->value().rep();
+ if (value->size() == 1)
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).addressForSwitch();
+ }
+
+ return result;
+}
+
+void* JITStubs::cti_op_switch_string(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ JSValuePtr scrutinee = ARG_src1;
+ unsigned tableIndex = ARG_int2;
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
+
+ if (scrutinee.isString()) {
+ UString::Rep* value = asString(scrutinee)->value().rep();
+ result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).addressForSwitch();
+ }
+
+ return result;
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_op_del_by_val(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ JSValuePtr baseValue = ARG_src1;
+ JSObject* baseObj = baseValue.toObject(callFrame); // may throw
+
+ JSValuePtr subscript = ARG_src2;
+ JSValuePtr result;
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ result = jsBoolean(baseObj->deleteProperty(callFrame, i));
+ else {
+ CHECK_FOR_EXCEPTION();
+ Identifier property(callFrame, subscript.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ result = jsBoolean(baseObj->deleteProperty(callFrame, property));
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValuePtr::encode(result);
+}
+
+void JITStubs::cti_op_put_getter(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ ASSERT(ARG_src1.isObject());
+ JSObject* baseObj = asObject(ARG_src1);
+ ASSERT(ARG_src3.isObject());
+ baseObj->defineGetter(callFrame, *ARG_id2, asObject(ARG_src3));
+}
+
+void JITStubs::cti_op_put_setter(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ ASSERT(ARG_src1.isObject());
+ JSObject* baseObj = asObject(ARG_src1);
+ ASSERT(ARG_src3.isObject());
+ baseObj->defineSetter(callFrame, *ARG_id2, asObject(ARG_src3));
+}
+
+JSObject* JITStubs::cti_op_new_error(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned type = ARG_int1;
+ JSValuePtr message = ARG_src2;
+ unsigned bytecodeOffset = ARG_int3;
+
+ unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
+ return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL());
+}
+
+void JITStubs::cti_op_debug(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+
+ int debugHookID = ARG_int1;
+ int firstLine = ARG_int2;
+ int lastLine = ARG_int3;
+
+ ARG_globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
+}
+
+JSValueEncodedAsPointer* JITStubs::cti_vm_throw(STUB_ARGS)
+{
+ BEGIN_STUB_FUNCTION();
+
+ CallFrame* callFrame = ARG_callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalData* globalData = ARG_globalData;
+
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
+
+ JSValuePtr exceptionValue = globalData->exception;
+ ASSERT(exceptionValue);
+ globalData->exception = noValue();
+
+ HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false);
+
+ if (!handler) {
+ *ARG_exception = exceptionValue;
+ return JSValuePtr::encode(jsNull());
+ }
+
+ ARG_setCallFrame(callFrame);
+ void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
+ ASSERT(catchRoutine);
+ STUB_SET_RETURN_ADDRESS(catchRoutine);
+ return JSValuePtr::encode(exceptionValue);
+}
+
+#undef STUB_RETURN_ADDRESS
+#undef STUB_SET_RETURN_ADDRESS
+#undef BEGIN_STUB_FUNCTION
+#undef CHECK_FOR_EXCEPTION
+#undef CHECK_FOR_EXCEPTION_AT_END
+#undef CHECK_FOR_EXCEPTION_VOID
+#undef VM_THROW_EXCEPTION
+#undef VM_THROW_EXCEPTION_2
+#undef VM_THROW_EXCEPTION_AT_END
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
new file mode 100644
index 0000000..b7b8f35
--- /dev/null
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubs_h
+#define JITStubs_h
+
+#include "Register.h"
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class ExecutablePool;
+ class JSGlobalData;
+ class JSObject;
+ class JSPropertyNameIterator;
+ class JSValueEncodedAsPointer;
+ class CodeBlock;
+ class JSValuePtr;
+ class Identifier;
+ class PropertySlot;
+ class PutPropertySlot;
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ #define STUB_ARGS void* args, ...
+ #define ARGS (reinterpret_cast<void**>(vl_args) - 1)
+#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK
+ #define STUB_ARGS void** args
+ #define ARGS (args)
+#endif
+
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+ #if PLATFORM(X86_64)
+ #define JIT_STUB
+ #elif COMPILER(MSVC)
+ #define JIT_STUB __fastcall
+ #elif COMPILER(GCC)
+ #define JIT_STUB __attribute__ ((fastcall))
+ #else
+ #error Need to support register calling convention in this compiler
+ #endif
+#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
+ #if COMPILER(MSVC)
+ #define JIT_STUB __cdecl
+ #else
+ #define JIT_STUB
+ #endif
+#endif
+
+// The Mac compilers are fine with this,
+#if PLATFORM(MAC)
+ struct VoidPtrPair {
+ void* first;
+ void* second;
+ };
+#define RETURN_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair
+#else
+ typedef uint64_t VoidPtrPair;
+ union VoidPtrPairValue {
+ struct { void* first; void* second; } s;
+ VoidPtrPair i;
+ };
+#define RETURN_PAIR(a,b) VoidPtrPairValue pair = {{ a, b }}; return pair.i
+#endif
+
+ class JITStubs {
+ public:
+ JITStubs(JSGlobalData*);
+
+ static JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_array(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_error(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_func(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_object(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS);
+ static JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS);
+ static JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_add(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_bitand(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_bitnot(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_bitor(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_bitxor(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_call_eval(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_id(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_val(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_div(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_eq(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_generic(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_second(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_in(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_instanceof(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_boolean(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_function(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_number(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_object(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_string(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_is_undefined(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_less(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_lesseq(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_lshift(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_mod(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_mul(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_negate(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_neq(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_next_pname(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_not(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_nstricteq(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_dec(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_inc(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_base(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_global(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_skip(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_rshift(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_stricteq(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_sub(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_throw(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_to_jsnumber(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_typeof(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_op_urshift(STUB_ARGS);
+ static JSValueEncodedAsPointer* JIT_STUB cti_vm_throw(STUB_ARGS);
+ static VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS);
+ static VoidPtrPair JIT_STUB cti_op_post_dec(STUB_ARGS);
+ static VoidPtrPair JIT_STUB cti_op_post_inc(STUB_ARGS);
+ static VoidPtrPair JIT_STUB cti_op_resolve_func(STUB_ARGS);
+ static VoidPtrPair JIT_STUB cti_op_resolve_with_base(STUB_ARGS);
+ static int JIT_STUB cti_op_jless(STUB_ARGS);
+ static int JIT_STUB cti_op_jtrue(STUB_ARGS);
+ static int JIT_STUB cti_op_loop_if_less(STUB_ARGS);
+ static int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS);
+ static int JIT_STUB cti_op_loop_if_true(STUB_ARGS);
+ static int JIT_STUB cti_timeout_check(STUB_ARGS);
+ static void JIT_STUB cti_op_create_arguments(STUB_ARGS);
+ static void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS);
+ static void JIT_STUB cti_op_debug(STUB_ARGS);
+ static void JIT_STUB cti_op_end(STUB_ARGS);
+ static void JIT_STUB cti_op_jmp_scopes(STUB_ARGS);
+ static void JIT_STUB cti_op_pop_scope(STUB_ARGS);
+ static void JIT_STUB cti_op_profile_did_call(STUB_ARGS);
+ static void JIT_STUB cti_op_profile_will_call(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_id(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_id_second(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_index(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_val(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_val_array(STUB_ARGS);
+ static void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS);
+ static void JIT_STUB cti_op_put_getter(STUB_ARGS);
+ static void JIT_STUB cti_op_put_setter(STUB_ARGS);
+ static void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS);
+ static void JIT_STUB cti_op_tear_off_activation(STUB_ARGS);
+ static void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS);
+ static void JIT_STUB cti_register_file_check(STUB_ARGS);
+ static void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS);
+ static void* JIT_STUB cti_op_switch_char(STUB_ARGS);
+ static void* JIT_STUB cti_op_switch_imm(STUB_ARGS);
+ static void* JIT_STUB cti_op_switch_string(STUB_ARGS);
+ static void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS);
+ static void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS);
+
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot&);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot&);
+
+ void* ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
+ void* ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
+ void* ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
+ void* ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
+ void* ctiVirtualCall() { return m_ctiVirtualCall; }
+
+ private:
+ RefPtr<ExecutablePool> m_executablePool;
+
+ void* m_ctiArrayLengthTrampoline;
+ void* m_ctiStringLengthTrampoline;
+ void* m_ctiVirtualCallPreLink;
+ void* m_ctiVirtualCallLink;
+ void* m_ctiVirtualCall;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubs_h