summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/JIT.cpp54
-rw-r--r--JavaScriptCore/jit/JIT.h41
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp8
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp10
-rw-r--r--JavaScriptCore/jit/JITCall.cpp475
-rw-r--r--JavaScriptCore/jit/JITCall32_64.cpp427
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h62
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp136
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp331
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp17
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp26
-rw-r--r--JavaScriptCore/jit/JITStubCall.h10
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp119
-rw-r--r--JavaScriptCore/jit/JITStubs.h25
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h23
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h6
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.cpp31
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.h15
18 files changed, 1098 insertions, 718 deletions
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index c7a4056..eeffd5c 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -24,6 +24,8 @@
*/
#include "config.h"
+
+#if ENABLE(JIT)
#include "JIT.h"
// This probably does not belong here; adding here for now as a quick Windows build fix.
@@ -32,8 +34,6 @@
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
#endif
-#if ENABLE(JIT)
-
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
@@ -78,10 +78,10 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_labels(codeBlock ? codeBlock->instructions().size() : 0)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
- , m_bytecodeIndex((unsigned)-1)
+ , m_bytecodeOffset((unsigned)-1)
#if USE(JSVALUE32_64)
, m_jumpTargetIndex(0)
- , m_mappedBytecodeIndex((unsigned)-1)
+ , m_mappedBytecodeOffset((unsigned)-1)
, m_mappedVirtualRegisterIndex((unsigned)-1)
, m_mappedTag((RegisterID)-1)
, m_mappedPayload((RegisterID)-1)
@@ -114,7 +114,7 @@ void JIT::emitTimeoutCheck()
#endif
#define NEXT_OPCODE(name) \
- m_bytecodeIndex += OPCODE_LENGTH(name); \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
#if USE(JSVALUE32_64)
@@ -176,21 +176,21 @@ void JIT::privateCompileMainPass()
m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
- for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
- ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
+ for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
#if ENABLE(OPCODE_SAMPLING)
- if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
#endif
#if !USE(JSVALUE32_64)
- if (m_labels[m_bytecodeIndex].isUsed())
+ if (m_labels[m_bytecodeOffset].isUsed())
killLastResultRegister();
#endif
- m_labels[m_bytecodeIndex] = label();
+ m_labels[m_bytecodeOffset] = label();
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
@@ -221,7 +221,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_call_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
- DEFINE_OP(op_construct_verify)
DEFINE_OP(op_convert_this)
DEFINE_OP(op_init_arguments)
DEFINE_OP(op_create_arguments)
@@ -302,6 +301,8 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_ret)
+ DEFINE_OP(op_call_put_result)
+ DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
DEFINE_OP(op_urshift)
DEFINE_OP(op_sret)
@@ -347,7 +348,7 @@ void JIT::privateCompileMainPass()
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
@@ -356,7 +357,7 @@ void JIT::privateCompileLinkPass()
{
unsigned jmpTableCount = m_jmpTable.size();
for (unsigned i = 0; i < jmpTableCount; ++i)
- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this);
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
m_jmpTable.clear();
}
@@ -373,11 +374,11 @@ void JIT::privateCompileSlowCases()
killLastResultRegister();
#endif
- m_bytecodeIndex = iter->to;
+ m_bytecodeOffset = iter->to;
#ifndef NDEBUG
- unsigned firstTo = m_bytecodeIndex;
+ unsigned firstTo = m_bytecodeOffset;
#endif
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_SLOWCASE_OP(op_add)
@@ -389,7 +390,6 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
DEFINE_SLOWCASE_OP(op_construct)
- DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
#if !USE(JSVALUE32)
DEFINE_SLOWCASE_OP(op_div)
@@ -450,7 +450,7 @@ void JIT::privateCompileSlowCases()
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
@@ -483,10 +483,10 @@ JITCode JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
registerFileCheck.link(this);
- m_bytecodeIndex = 0;
+ m_bytecodeOffset = 0;
JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
- m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
jump(functionBody);
}
@@ -498,27 +498,27 @@ JITCode JIT::privateCompile()
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
SwitchRecord record = m_switches[i];
- unsigned bytecodeIndex = record.bytecodeIndex;
+ unsigned bytecodeOffset = record.bytecodeOffset;
if (record.type != SwitchRecord::String) {
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
@@ -536,7 +536,7 @@ JITCode JIT::privateCompile()
if (m_codeBlock->hasExceptionInfo()) {
m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex));
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
}
// Link absolute addresses for jsr
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index b857351..928c80b 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -66,16 +66,16 @@ namespace JSC {
struct CallRecord {
MacroAssembler::Call from;
- unsigned bytecodeIndex;
+ unsigned bytecodeOffset;
void* to;
CallRecord()
{
}
- CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0)
: from(from)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, to(to)
{
}
@@ -83,11 +83,11 @@ namespace JSC {
struct JumpTable {
MacroAssembler::Jump from;
- unsigned toBytecodeIndex;
+ unsigned toBytecodeOffset;
JumpTable(MacroAssembler::Jump f, unsigned t)
: from(f)
- , toBytecodeIndex(t)
+ , toBytecodeOffset(t)
{
}
};
@@ -119,20 +119,20 @@ namespace JSC {
StringJumpTable* stringJumpTable;
} jumpTable;
- unsigned bytecodeIndex;
+ unsigned bytecodeOffset;
unsigned defaultOffset;
- SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
: type(type)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, defaultOffset(defaultOffset)
{
this->jumpTable.simpleJumpTable = jumpTable;
}
- SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
: type(String)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, defaultOffset(defaultOffset)
{
this->jumpTable.stringJumpTable = jumpTable;
@@ -223,6 +223,12 @@ namespace JSC {
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
}
+ static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
+ {
+ JIT jit(globalData);
+ return jit.privateCompileCTINativeCall(executablePool, globalData, func);
+ }
+
static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
@@ -263,6 +269,7 @@ namespace JSC {
void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
+ CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
void addSlowCase(Jump);
@@ -306,8 +313,8 @@ namespace JSC {
void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
void emitStoreDouble(unsigned index, FPRegisterID value);
- bool isLabeled(unsigned bytecodeIndex);
- void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ bool isLabeled(unsigned bytecodeOffset);
+ void map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
void unmap(RegisterID);
void unmap();
bool isMapped(unsigned virtualRegisterIndex);
@@ -644,9 +651,9 @@ namespace JSC {
void emit_op_call(Instruction*);
void emit_op_call_eval(Instruction*);
void emit_op_call_varargs(Instruction*);
+ void emit_op_call_put_result(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
- void emit_op_construct_verify(Instruction*);
void emit_op_convert_this(Instruction*);
void emit_op_create_arguments(Instruction*);
void emit_op_debug(Instruction*);
@@ -723,6 +730,7 @@ namespace JSC {
void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_ret(Instruction*);
+ void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
void emit_op_sret(Instruction*);
void emit_op_strcat(Instruction*);
@@ -751,7 +759,6 @@ namespace JSC {
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -879,7 +886,7 @@ namespace JSC {
Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
Vector<JumpTable> m_jmpTable;
- unsigned m_bytecodeIndex;
+ unsigned m_bytecodeOffset;
Vector<JSRInfo> m_jsrSites;
Vector<SlowCaseEntry> m_slowCases;
Vector<SwitchRecord> m_switches;
@@ -890,7 +897,7 @@ namespace JSC {
#if USE(JSVALUE32_64)
unsigned m_jumpTargetIndex;
- unsigned m_mappedBytecodeIndex;
+ unsigned m_mappedBytecodeOffset;
unsigned m_mappedVirtualRegisterIndex;
RegisterID m_mappedTag;
RegisterID m_mappedPayload;
@@ -905,7 +912,7 @@ namespace JSC {
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
- static PassRefPtr<NativeExecutable> stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
+ static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
} JIT_CLASS_ALIGNMENT;
inline void JIT::emit_op_loop(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index cd39b3a..0e5bb45 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -24,9 +24,10 @@
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if !USE(JSVALUE32_64)
+#include "JIT.h"
#include "CodeBlock.h"
#include "JITInlineMethods.h"
@@ -46,8 +47,6 @@ using namespace std;
namespace JSC {
-#if !USE(JSVALUE32_64)
-
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -1834,8 +1833,7 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
-#endif // !USE(JSVALUE32_64)
-
} // namespace JSC
+#endif // !USE(JSVALUE32_64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 97c8d32..023426e 100644
--- a/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -24,9 +24,10 @@
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
#include "CodeBlock.h"
#include "JITInlineMethods.h"
@@ -46,8 +47,6 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
void JIT::emit_op_negate(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1404,8 +1403,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
/* ------------------------------ END: OP_MOD ------------------------------ */
-#endif // USE(JSVALUE32_64)
-
-}
+} // namespace JSC
+#endif // USE(JSVALUE32_64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index 9979c8e..c0de9d1 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -24,9 +24,10 @@
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if !USE(JSVALUE32_64)
+#include "JIT.h"
#include "CodeBlock.h"
#include "JITInlineMethods.h"
@@ -45,406 +46,20 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::compileOpCallInitializeCallFrame()
-{
- // regT0 holds callee, regT1 holds argCount
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain
-
- emitStore(static_cast<unsigned>(RegisterFile::OptionalCalleeArguments), JSValue());
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
- storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
-}
-
-void JIT::compileOpCallSetupArgs(Instruction* instruction)
-{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArgConstant(registerOffset, 1);
- emitPutJITStubArgConstant(argCount, 2);
-}
-
-void JIT::compileOpConstructSetupArgs(Instruction* instruction)
-{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArgConstant(registerOffset, 1);
- emitPutJITStubArgConstant(argCount, 2);
- emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3);
- emitPutJITStubArgConstant(thisRegister, 4);
-}
-
-void JIT::compileOpCallVarargsSetupArgs(Instruction*)
-{
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArg(regT3, 1); // registerOffset
- emitPutJITStubArg(regT2, 2); // argCount
-}
-
-void JIT::compileOpCallVarargs(Instruction* instruction)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCountRegister = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- emitLoad(callee, regT1, regT0);
- emitLoadPayload(argCountRegister, regT2); // argCount
- addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
-
- compileOpCallVarargsSetupArgs(instruction);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT3, regT3);
- addPtr(callFrameRegister, regT3);
- storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
- move(regT3, callFrameRegister);
-
- move(regT2, regT1); // argCount
-
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
-
- emitStore(dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
-
- map(m_bytecodeIndex + OPCODE_LENGTH(op_call_varargs), dst, regT1, regT0);
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::emit_op_ret(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_ret_scopeChain).call();
-
- emitLoad(dst, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-}
-
-void JIT::emit_op_construct_verify(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- emitLoad(dst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
-}
-
-void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitLoad(src, regT1, regT0);
- emitStore(dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallVarargsSlowCase(currentInstruction, iter);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
-}
-
-void JIT::emit_op_call(Instruction* currentInstruction)
-{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_load_varargs(Instruction* currentInstruction)
-{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
-
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCallVarargs(currentInstruction);
-}
-
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
-
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT0);
- }
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- emitStore(dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- DataLabelPtr addressOfLinkedFunctionCheck;
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
-
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- addSlowCase(jumpToSlow);
- ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- // The following is the fast case, only used whan a callee can be linked.
-
- // In the case of OpConstruct, call out to a cti_ function to create the new object.
- if (opcodeID == op_construct) {
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- JITStubCall stubCall(this, cti_op_construct_JSConstruct);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
- stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
- stubCall.addArgument(proto);
- stubCall.call(thisRegister);
-
- emitLoad(callee, regT1, regT0);
- }
-
- // Fast version of stack frame initialization, directly relative to edi.
- // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- emitStore(registerOffset + RegisterFile::OptionalCalleeArguments, JSValue());
- emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
- storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
- storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
-
- // Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + opcodeLengths[opcodeID], dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- // The arguments have been set up on the hot path for op_call_eval
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
-
- // Fast check for JS function.
- Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
-
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT0);
- }
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
-
- // Put the return value in dst.
- emitStore(dst, regT1, regT0);;
- sampleCodeBlock(m_codeBlock);
-
- // If not, we need an extra case in the if below!
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
-
- // Done! - return back to the hot path.
- if (opcodeID == op_construct)
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
- else
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
-
- // This handles host functions
- callLinkFailNotObject.link(this);
- callLinkFailNotJSFunction.link(this);
- JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
-
- emitStore(dst, regT1, regT0);;
- sampleCodeBlock(m_codeBlock);
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
-#else // USE(JSVALUE32_64)
-
void JIT::compileOpCallInitializeCallFrame()
{
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
void JIT::compileOpCallSetupArgs(Instruction* instruction)
{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
// ecx holds func
emitPutJITStubArg(regT0, 0);
@@ -454,7 +69,7 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
{
- int registerOffset = instruction[4].u.operand;
+ int registerOffset = instruction[3].u.operand;
// ecx holds func
emitPutJITStubArg(regT0, 0);
@@ -465,10 +80,10 @@ void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
void JIT::compileOpConstructSetupArgs(Instruction* instruction)
{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+ int proto = instruction[4].u.operand;
+ int thisRegister = instruction[5].u.operand;
// ecx holds func
emitPutJITStubArg(regT0, 0);
@@ -478,11 +93,16 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
emitPutJITStubArgConstant(thisRegister, 4);
}
-void JIT::compileOpCallVarargs(Instruction* instruction)
+void JIT::emit_op_call_put_result(Instruction* instruction)
{
int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCountRegister = instruction[3].u.operand;
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
emitGetVirtualRegister(callee, regT0);
@@ -501,20 +121,15 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
addPtr(regT2, callFrameRegister);
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileOpCallVarargsSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = instruction[1].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+ stubCall.call();
sampleCodeBlock(m_codeBlock);
}
@@ -525,10 +140,9 @@ void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCase
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
// Handle eval
Jump wasEval;
@@ -568,20 +182,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
if (opcodeID == op_call_eval)
wasEval.link(this);
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
{
- int dst = instruction[1].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+ stubCall.call();
sampleCodeBlock(m_codeBlock);
}
@@ -592,10 +201,9 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
// Handle eval
Jump wasEval;
@@ -627,8 +235,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// In the case of OpConstruct, call out to a cti_ function to create the new object.
if (opcodeID == op_construct) {
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
+ int proto = instruction[4].u.operand;
+ int thisRegister = instruction[5].u.operand;
emitPutJITStubArg(regT0, 0);
emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
@@ -639,11 +247,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
@@ -653,18 +262,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call_eval)
wasEval.link(this);
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
linkSlowCase(iter);
@@ -693,10 +298,6 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
- // Put the return value in dst.
- emitPutVirtualRegister(dst);
- sampleCodeBlock(m_codeBlock);
-
// If not, we need an extra case in the if below!
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
@@ -711,7 +312,6 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
callLinkFailNotJSFunction.link(this);
JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
- emitPutVirtualRegister(dst);
sampleCodeBlock(m_codeBlock);
}
@@ -719,8 +319,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-#endif // USE(JSVALUE32_64)
-
} // namespace JSC
+#endif // !USE(JSVALUE32_64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall32_64.cpp b/JavaScriptCore/jit/JITCall32_64.cpp
new file mode 100644
index 0000000..9b5451a
--- /dev/null
+++ b/JavaScriptCore/jit/JITCall32_64.cpp
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ // regT0 holds callee, regT1 holds argCount
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain
+
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
+ storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
+}
+
+void JIT::compileOpCallSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+}
+
+void JIT::compileOpConstructSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+ int proto = instruction[4].u.operand;
+ int thisRegister = instruction[5].u.operand;
+
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3);
+ emitPutJITStubArgConstant(thisRegister, 4);
+}
+
+void JIT::compileOpCallVarargsSetupArgs(Instruction*)
+{
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArg(regT3, 1); // registerOffset
+ emitPutJITStubArg(regT2, 2); // argCount
+}
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ emitLoad(callee, regT1, regT0);
+ emitLoadPayload(argCountRegister, regT2); // argCount
+ addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
+
+ compileOpCallVarargsSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT3, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ move(regT3, callFrameRegister);
+
+ move(regT2, regT1); // argCount
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int callee = instruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned thisReg = currentInstruction[2].u.operand;
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+
+ emitLoad(result, regT1, regT0);
+ Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+ notJSCell.link(this);
+ notObject.link(this);
+ emitLoad(thisReg, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstruct() : m_globalData->jitStubs.ctiVirtualCall());
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // In the case of OpConstruct, call out to a cti_ function to create the new object.
+ if (opcodeID == op_construct) {
+ int proto = instruction[4].u.operand;
+ int thisRegister = instruction[5].u.operand;
+
+ JITStubCall stubCall(this, cti_op_construct_JSConstruct);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(proto);
+ stubCall.call(thisRegister);
+
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT2);
+
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
+ storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs.ctiVirtualConstructLink() : m_globalData->jitStubs.ctiVirtualCallLink());
+
+ // If not, we need an extra case in the if below!
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Done! - return back to the hot path.
+ if (opcodeID == op_construct)
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
+ else
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+ JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 148f615..89faa00 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -113,10 +113,10 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHead
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
@@ -243,33 +243,33 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+ jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
#if ENABLE(SAMPLING_FLAGS)
@@ -485,24 +485,24 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
emitStore(dst, jsUndefined());
}
-inline bool JIT::isLabeled(unsigned bytecodeIndex)
+inline bool JIT::isLabeled(unsigned bytecodeOffset)
{
for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
- if (jumpTarget == bytecodeIndex)
+ if (jumpTarget == bytecodeOffset)
return true;
- if (jumpTarget > bytecodeIndex)
+ if (jumpTarget > bytecodeOffset)
return false;
}
return false;
}
-inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
{
- if (isLabeled(bytecodeIndex))
+ if (isLabeled(bytecodeOffset))
return;
- m_mappedBytecodeIndex = bytecodeIndex;
+ m_mappedBytecodeOffset = bytecodeOffset;
m_mappedVirtualRegisterIndex = virtualRegisterIndex;
m_mappedTag = tag;
m_mappedPayload = payload;
@@ -518,7 +518,7 @@ inline void JIT::unmap(RegisterID registerID)
inline void JIT::unmap()
{
- m_mappedBytecodeIndex = (unsigned)-1;
+ m_mappedBytecodeOffset = (unsigned)-1;
m_mappedVirtualRegisterIndex = (unsigned)-1;
m_mappedTag = (RegisterID)-1;
m_mappedPayload = (RegisterID)-1;
@@ -526,7 +526,7 @@ inline void JIT::unmap()
inline bool JIT::isMapped(unsigned virtualRegisterIndex)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -535,7 +535,7 @@ inline bool JIT::isMapped(unsigned virtualRegisterIndex)
inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -547,7 +547,7 @@ inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& pay
inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -559,14 +559,22 @@ inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ }
}
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ }
}
inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
@@ -632,7 +640,7 @@ ALWAYS_INLINE void JIT::killLastResultRegister()
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -644,8 +652,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
atJumpTarget = true;
++m_jumpTargetsPosition;
}
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 9b0f780..f8be135 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -25,9 +25,8 @@
*/
#include "config.h"
-#include "JIT.h"
-
#if ENABLE(JIT)
+#include "JIT.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
@@ -42,7 +41,7 @@ namespace JSC {
#if !USE(JSVALUE32_64)
#define RECORD_JUMP_TARGET(targetOffset) \
- do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+ do { m_labels[m_bytecodeOffset + (targetOffset)].used(); } while (false)
void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
{
@@ -259,7 +258,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(callFrameRegister, X86Registers::edi);
- call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
+ call(Address(X86Registers::r9, OBJECT_OFFSETOF(NativeExecutable, m_function)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
#elif CPU(X86)
@@ -337,7 +337,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Plant callframe
move(callFrameRegister, X86Registers::edx);
- call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::ebx);
+ call(Address(X86Registers::ebx, OBJECT_OFFSETOF(NativeExecutable, m_function)));
// JSValue is a non-POD type
loadPtr(Address(X86Registers::eax), X86Registers::eax);
@@ -347,7 +348,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Plant callframe
move(callFrameRegister, X86Registers::ecx);
- call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::ebx);
+ call(Address(X86Registers::ebx, OBJECT_OFFSETOF(NativeExecutable, m_function)));
#endif
// We've put a few temporaries on the stack in addition to the actual arguments
@@ -395,7 +397,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
subPtr(Imm32(sizeof(Register)), stackPointerRegister);
storePtr(regT0, Address(stackPointerRegister));
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT3);
+ call(Address(regT3, OBJECT_OFFSETOF(NativeExecutable, m_function)));
loadPtr(Address(regT0), regT0);
@@ -413,7 +416,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Setup arg4: This is a plain hack
move(stackPointerRegister, ARMRegisters::r3);
- call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT3);
+ call(Address(regT3, OBJECT_OFFSETOF(NativeExecutable, m_function)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
#endif // OS(WINCE)
@@ -460,7 +464,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
addPtr(Imm32(20), stackPointerRegister, MIPSRegisters::a0);
// Call
- call(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, OBJECT_OFFSETOF(NativeExecutable, m_function)));
// Get returned value from 0($v0) which is the same as 20($sp)
loadPtr(Address(returnValueRegister, 0), returnValueRegister);
@@ -535,7 +540,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin);
trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
- trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
+ trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk);
#if ENABLE(JIT_OPTIMIZE_MOD)
trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
#endif
@@ -544,6 +549,11 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
}
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
+{
+ return globalData->jitStubs.ctiNativeCall();
+}
+
void JIT::emit_op_mov(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -711,7 +721,7 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
{
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[3].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
while (skip--)
@@ -724,7 +734,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
{
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[2].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
@@ -739,12 +749,19 @@ void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand), regT2);
stubCall.call();
}
-void JIT::emit_op_tear_off_arguments(Instruction*)
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
{
- JITStubCall(this, cti_op_tear_off_arguments).call();
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
+ stubCall.call();
+ argsNotCreated.link(this);
}
void JIT::emit_op_ret(Instruction* currentInstruction)
@@ -771,6 +788,48 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
ret();
}
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
+ loadPtr(Address(returnValueRegister, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Return 'this' in %eax.
+ notJSCell.link(this);
+ notObject.link(this);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_array);
@@ -786,16 +845,6 @@ void JIT::emit_op_resolve(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_construct_verify(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
-
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -831,7 +880,7 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1266,7 +1315,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_imm);
@@ -1284,7 +1333,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_char);
@@ -1302,7 +1351,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee, regT2);
@@ -1316,7 +1365,7 @@ void JIT::emit_op_new_error(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_new_error);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
- stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.addArgument(Imm32(m_bytecodeOffset));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1407,19 +1456,26 @@ void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_create_arguments(Instruction*)
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
if (m_codeBlock->m_numParameters == 1)
JITStubCall(this, cti_op_create_arguments_no_params).call();
else
JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(dst);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
argsCreated.link(this);
}
-
-void JIT::emit_op_init_arguments(Instruction*)
+
+void JIT::emit_op_init_arguments(Instruction* currentInstruction)
{
- storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ unsigned dst = currentInstruction[1].u.operand;
+
+ storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+ storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -1467,14 +1523,6 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
@@ -1672,7 +1720,7 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
{
- int skip = currentInstruction[6].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[6].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
while (skip--) {
@@ -1688,7 +1736,7 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
unsigned dst = currentInstruction[1].u.operand;
void* globalObject = currentInstruction[2].u.jsCell;
Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
- int skip = currentInstruction[6].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[6].u.operand;
while (skip--)
linkSlowCase(iter);
JITStubCall resolveStubCall(this, cti_op_resolve);
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
index d4edb92..b814801 100644
--- a/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -25,9 +25,10 @@
*/
#include "config.h"
-#include "JIT.h"
-#if ENABLE(JIT) && USE(JSVALUE32_64)
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
@@ -309,14 +310,16 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Plant callframe
move(callFrameRegister, X86Registers::edx);
- call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::ebx);
+ call(Address(X86Registers::ebx, OBJECT_OFFSETOF(NativeExecutable, m_function)));
// JSValue is a non-POD type, so eax points to it
emitLoad(0, regT1, regT0, X86Registers::eax);
#else
emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
move(callFrameRegister, X86Registers::ecx); // callFrame
- call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::ebx);
+ call(Address(X86Registers::ebx, OBJECT_OFFSETOF(NativeExecutable, m_function)));
#endif
// We've put a few temporaries on the stack in addition to the actual arguments
@@ -373,7 +376,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Setup arg0:
move(stackPointerRegister, regT0);
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT3);
+ call(Address(regT3, OBJECT_OFFSETOF(NativeExecutable, m_function)));
load32(Address(stackPointerRegister, 0), regT0);
load32(Address(stackPointerRegister, 4), regT1);
@@ -401,7 +405,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Setup arg1:
move(callFrameRegister, regT1);
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT3);
+ call(Address(regT3, OBJECT_OFFSETOF(NativeExecutable, m_function)));
// Load return value
load32(Address(stackPointerRegister, 16), regT0);
@@ -476,7 +481,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
- trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
+ trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
#endif
@@ -489,6 +494,251 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
}
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func)
+{
+#if CPU(X86) || CPU(ARM_TRADITIONAL)
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+#if CPU(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+
+#if COMPILER(MSVC) || OS(LINUX)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
+ storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+#if COMPILER(MSVC) || OS(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86Registers::edx);
+
+ Call nativeCall = call();
+
+ // JSValue is a non-POD type, so eax points to it
+ emitLoad(0, regT1, regT0, X86Registers::eax);
+#else
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
+ move(callFrameRegister, X86Registers::ecx); // callFrame
+ Call nativeCall = call();
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif CPU(ARM_TRADITIONAL)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ // Allocate stack space for our arglist
+ COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0 && sizeof(JSValue) == 8 && sizeof(Register) == 8, ArgList_should_by_8byte_aligned);
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ move(callFrameRegister, regT1);
+ sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+
+ // push pointer to arguments
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // Argument passing method:
+ // r0 - points to return value
+ // r1 - callFrame
+ // r2 - callee
+ // stack: this(JSValue) and a pointer to ArgList
+
+#if OS(WINCE)
+ // Setup arg4:
+ push(stackPointerRegister);
+
+ // Setup arg3:
+ // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ load32(Address(regT1, -(int32_t)sizeof(void*) * 2), ARMRegisters::r3);
+ push(ARMRegisters::r3);
+ load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
+ storePtr(regT3, Address(stackPointerRegister));
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
+
+ // Setup arg1:
+ move(callFrameRegister, regT1);
+
+ // Setup arg0:
+ move(stackPointerRegister, regT0);
+
+ Call nativeCall = call();
+
+ load32(Address(stackPointerRegister, 0), regT0);
+ load32(Address(stackPointerRegister, 4), regT1);
+
+ addPtr(Imm32(sizeof(ArgList) + 8), stackPointerRegister);
+#else // OS(WINCE)
+ move(stackPointerRegister, regT3);
+ subPtr(Imm32(8), stackPointerRegister);
+ move(stackPointerRegister, regT0);
+ subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister);
+
+ // Setup arg4:
+ storePtr(regT3, Address(stackPointerRegister, 8));
+
+ // Setup arg3:
+ // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ load32(Address(regT1, -(int32_t)sizeof(void*) * 2), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 0));
+ load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 4));
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
+
+ // Setup arg1:
+ move(callFrameRegister, regT1);
+
+ Call nativeCall = call();
+
+ // Load return value
+ load32(Address(stackPointerRegister, 16), regT0);
+ load32(Address(stackPointerRegister, 20), regT1);
+
+ addPtr(Imm32(sizeof(ArgList) + 16 + 8), stackPointerRegister);
+#endif // OS(WINCE)
+
+#endif
+
+ // Check for an exception
+ move(ImmPtr(&globalData->exception), regT2);
+ Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, executablePool);
+
+#if ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+ patchBuffer.link(nativeCall, FunctionPtr(func));
+#endif
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ return trampolineAt(finalCode, nativeCallThunk);
+}
+
void JIT::emit_op_mov(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -499,7 +749,7 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
else {
emitLoad(src, regT1, regT0);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
}
}
@@ -649,7 +899,7 @@ void JIT::emit_op_get_global_var(Instruction* currentInstruction)
emitLoad(index, regT1, regT0, regT2);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
}
void JIT::emit_op_put_global_var(Instruction* currentInstruction)
@@ -663,14 +913,14 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
loadPtr(&globalObject->d()->registers, regT2);
emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
}
void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[3].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
while (skip--)
@@ -682,13 +932,13 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
emitLoad(index, regT1, regT0, regT2);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
}
void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
{
int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
emitLoad(value, regT1, regT0);
@@ -702,19 +952,26 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
}
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand));
stubCall.call();
}
-void JIT::emit_op_tear_off_arguments(Instruction*)
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
{
- JITStubCall(this, cti_op_tear_off_arguments).call();
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), Imm32(JSValue::EmptyValueTag));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst));
+ stubCall.call();
+ argsNotCreated.link(this);
}
void JIT::emit_op_new_array(Instruction* currentInstruction)
@@ -745,7 +1002,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
if (dst != src)
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
}
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -778,7 +1035,7 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -804,7 +1061,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+ map(m_bytecodeOffset + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
}
void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1390,7 +1647,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
if (src != dst)
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
}
void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1424,7 +1681,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
// Now store the exception returned by cti_op_throw.
emitStore(exception, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
}
void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
@@ -1443,7 +1700,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_imm);
@@ -1461,7 +1718,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_char);
@@ -1479,7 +1736,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee);
@@ -1497,7 +1754,7 @@ void JIT::emit_op_new_error(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_new_error);
stubCall.addArgument(Imm32(type));
stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.addArgument(Imm32(m_bytecodeOffset));
stubCall.call(dst);
}
@@ -1532,22 +1789,29 @@ void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_create_arguments(Instruction*)
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
- Jump argsCreated = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::EmptyValueTag));
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
- // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
if (m_codeBlock->m_numParameters == 1)
JITStubCall(this, cti_op_create_arguments_no_params).call();
else
JITStubCall(this, cti_op_create_arguments).call();
+ emitStore(dst, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
+
argsCreated.link(this);
}
-void JIT::emit_op_init_arguments(Instruction*)
+void JIT::emit_op_init_arguments(Instruction* currentInstruction)
{
- emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitStore(dst, JSValue());
+ emitStore(unmodifiedArgumentsRegister(dst), JSValue());
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -1561,7 +1825,7 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
- map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
}
void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1598,6 +1862,7 @@ void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
noProfiler.link(this);
}
-}
+} // namespace JSC
-#endif // ENABLE(JIT) && USE(JSVALUE32_64)
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 399afdd..4d36cfa 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -25,12 +25,10 @@
#include "config.h"
+#if ENABLE(JIT)
#if !USE(JSVALUE32_64)
-
#include "JIT.h"
-#if ENABLE(JIT)
-
#include "CodeBlock.h"
#include "GetterSetter.h"
#include "JITInlineMethods.h"
@@ -52,7 +50,7 @@ using namespace std;
namespace JSC {
-PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
JSInterfaceJIT jit;
JumpList failures;
@@ -80,7 +78,7 @@ PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* glob
jit.ret();
LinkBuffer patchBuffer(&jit, pool);
- return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ return patchBuffer.finalizeCode().m_code;
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -125,7 +123,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
Jump nonCell = jump();
linkSlowCase(iter); // base array check
Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
- emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCodeForCall().addressForCall());
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
Jump failed = branchTestPtr(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
@@ -380,7 +378,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
emitPutVirtualRegister(resultVReg);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -393,7 +391,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
@@ -1112,6 +1110,5 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
} // namespace JSC
-#endif // ENABLE(JIT)
-
#endif // !USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 792583b..6b8af7c 100644
--- a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -25,12 +25,10 @@
#include "config.h"
+#if ENABLE(JIT)
#if USE(JSVALUE32_64)
-
#include "JIT.h"
-#if ENABLE(JIT)
-
#include "CodeBlock.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
@@ -241,10 +239,10 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
match.link(this);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -258,7 +256,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
@@ -269,7 +267,7 @@ void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator
#endif
-PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
JSInterfaceJIT jit;
JumpList failures;
@@ -298,7 +296,7 @@ PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* glob
jit.ret();
LinkBuffer patchBuffer(&jit, pool);
- return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ return patchBuffer.finalizeCode().m_code;
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -321,7 +319,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
}
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -336,7 +334,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
Jump nonCell = jump();
linkSlowCase(iter); // base array check
Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
- emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCodeForCall().addressForCall());
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
@@ -414,7 +412,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(base, regT1);
compileGetByIdHotPath();
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
}
void JIT::compileGetByIdHotPath()
@@ -1159,7 +1157,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
}
void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1182,7 +1180,5 @@ void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowC
} // namespace JSC
+#endif // USE(JSVALUE32_64)
#endif // ENABLE(JIT)
-
-#endif // ENABLE(JSVALUE32_64)
-
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
index 70d2893..4478d06 100644
--- a/JavaScriptCore/jit/JITStubCall.h
+++ b/JavaScriptCore/jit/JITStubCall.h
@@ -167,17 +167,17 @@ namespace JSC {
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeIndex != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->restoreArgumentReference();
JIT::Call call = m_jit->call();
- m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub.value()));
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeIndex != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif
#if USE(JSVALUE32_64)
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index 3d1c272..aea80a7 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -28,9 +28,9 @@
*/
#include "config.h"
-#include "JITStubs.h"
#if ENABLE(JIT)
+#include "JITStubs.h"
#include "Arguments.h"
#include "CallFrame.h"
@@ -113,7 +113,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_s
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -135,7 +135,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
@@ -151,7 +151,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -176,7 +176,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_s
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
@@ -203,7 +203,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
@@ -219,7 +219,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -332,7 +332,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -354,7 +354,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
@@ -370,7 +370,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -394,7 +394,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -429,7 +429,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
@@ -445,7 +445,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -1082,7 +1082,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
extern "C" { \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
}; \
- asm volatile ( \
+ asm ( \
".text" "\n" \
".align 2" "\n" \
".globl " SYMBOL_STRING(cti_##op) "\n" \
@@ -1162,7 +1162,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
extern "C" { \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
}; \
- asm volatile ( \
+ asm ( \
".globl " SYMBOL_STRING(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
"str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
@@ -1717,7 +1717,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2002,27 +2002,25 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-DEFINE_STUB_FUNCTION(void, op_create_arguments)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
{
STUB_INIT_STACK_FRAME(stackFrame);
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
- stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
+ return JSValue::encode(JSValue(arguments));
}
-DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments_no_params)
{
STUB_INIT_STACK_FRAME(stackFrame);
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
- stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
+ return JSValue::encode(JSValue(arguments));
}
DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
@@ -2030,7 +2028,10 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- asActivation(stackFrame.args[0].jsValue())->copyRegisters(stackFrame.callFrame->optionalCalleeArguments());
+ JSActivation* activation = asActivation(stackFrame.args[0].jsValue());
+ activation->copyRegisters();
+ if (JSValue v = stackFrame.args[1].jsValue())
+ asArguments(v)->setActivation(activation);
}
DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
@@ -2038,8 +2039,7 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- if (stackFrame.callFrame->optionalCalleeArguments())
- stackFrame.callFrame->optionalCalleeArguments()->copyRegisters();
+ asArguments(stackFrame.args[0].jsValue())->copyRegisters();
}
DEFINE_STUB_FUNCTION(void, op_profile_will_call)
@@ -2097,7 +2097,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
} while (++iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2110,7 +2110,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
if (constructor->isHostFunction()) {
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createNotAConstructorError(callFrame, constructor, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2157,7 +2157,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
ASSERT(constructType == ConstructTypeNone);
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2175,6 +2175,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
if (LIKELY(baseValue.isCell() && subscript.isString())) {
Identifier propertyName(callFrame, asString(subscript)->value(callFrame));
PropertySlot slot(asCell(baseValue));
+ // JSString::value may have thrown, but we shouldn't find a property with a null identifier,
+ // so we should miss this case and wind up in the CHECK_FOR_EXCEPTION_AT_END, below.
if (asCell(baseValue)->fastGetOwnPropertySlot(callFrame, propertyName, slot)) {
JSValue result = slot.getValue(callFrame, propertyName);
CHECK_FOR_EXCEPTION();
@@ -2426,7 +2428,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
} else if (!arguments.isUndefinedOrNull()) {
if (!arguments.isObject()) {
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2466,7 +2468,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
}
} else {
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2526,7 +2528,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
} while (++iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -2558,7 +2560,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
return JSValue::encode(result);
}
- unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
VM_THROW_EXCEPTION();
}
@@ -2860,7 +2862,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
} while (iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION_AT_END();
return JSValue::encode(JSValue());
@@ -2959,7 +2961,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
@@ -3014,7 +3016,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
JSValue exceptionValue = stackFrame.args[0].jsValue();
ASSERT(exceptionValue);
@@ -3053,7 +3055,9 @@ DEFINE_STUB_FUNCTION(int, has_property)
JSObject* base = stackFrame.args[0].jsObject();
JSString* property = stackFrame.args[1].jsString();
- return base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
}
DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
@@ -3130,7 +3134,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
+ bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
@@ -3156,7 +3162,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(!JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
+ bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
@@ -3181,7 +3189,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
if (!baseVal.isObject()) {
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
stackFrame.globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
@@ -3270,6 +3278,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char)
result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->characters()[0]).executableAddress();
}
+ CHECK_FOR_EXCEPTION_AT_END();
return result;
}
@@ -3289,6 +3298,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_string)
result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
}
+ CHECK_FOR_EXCEPTION_AT_END();
return result;
}
@@ -3376,7 +3386,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
CodeBlock* codeBlock = callFrame->codeBlock();
JSGlobalData* globalData = stackFrame.globalData;
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, globalData->exceptionLocation);
JSValue exceptionValue = globalData->exception;
ASSERT(exceptionValue);
@@ -3404,13 +3414,28 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
}
-NativeExecutable* JITThunks::specializedThunk(JSGlobalData* globalData, ThunkGenerator generator)
+MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
+{
+ std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodePtr());
+ if (entry.second)
+ entry.first->second = generator(globalData, m_executablePool.get());
+ return entry.first->second;
+}
+
+PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ if (entry.second)
+ entry.first->second = NativeExecutable::create(JIT::compileCTINativeCall(globalData, m_executablePool, function), function);
+ return entry.first->second;
+}
+
+PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
{
- std::pair<ThunkMap::iterator, bool> entry = m_thunkMap.add(generator, 0);
- if (!entry.second)
- return entry.first->second.get();
- entry.first->second = generator(globalData, m_executablePool.get());
- return entry.first->second.get();
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ if (entry.second)
+ entry.first->second = NativeExecutable::create(generator(globalData, m_executablePool.get()), function);
+ return entry.first->second;
}
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 841950f..e5d1419 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -81,7 +81,7 @@ namespace JSC {
MacroAssemblerCodePtr ctiVirtualConstructLink;
MacroAssemblerCodePtr ctiVirtualCall;
MacroAssemblerCodePtr ctiVirtualConstruct;
- RefPtr<NativeExecutable> ctiNativeCallThunk;
+ MacroAssemblerCodePtr ctiNativeCall;
MacroAssemblerCodePtr ctiSoftModulo;
};
@@ -279,13 +279,18 @@ namespace JSC {
MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
- NativeExecutable* ctiNativeCallThunk() { return m_trampolineStructure.ctiNativeCallThunk.get(); }
+ MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
- NativeExecutable* specializedThunk(JSGlobalData* globalData, ThunkGenerator generator);
+ MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
+
+ PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func);
+ PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func, ThunkGenerator generator);
private:
- typedef HashMap<ThunkGenerator, RefPtr<NativeExecutable> > ThunkMap;
- ThunkMap m_thunkMap;
+ typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
+ CTIStubMap m_ctiStubMap;
+ typedef HashMap<NativeFunction, RefPtr<NativeExecutable> > HostFunctionStubMap;
+ HostFunctionStubMap m_hostFunctionStubMap;
RefPtr<ExecutablePool> m_executablePool;
TrampolineStructure m_trampolineStructure;
@@ -301,15 +306,17 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
@@ -353,8 +360,8 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
@@ -378,8 +385,6 @@ extern "C" {
int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
index 12a6cfa..c85b94d 100644
--- a/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -191,27 +191,32 @@ namespace JSC {
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
{
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag));
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
{
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
loadPtr(payloadFor(virtualRegisterIndex), dst);
return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::Int32Tag));
}
- inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned index, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned virtualRegisterIndex, RegisterID base)
{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
}
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned index, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
loadPtr(tagFor(virtualRegisterIndex), scratch);
Jump isDouble = branch32(Below, scratch, Imm32(JSValue::LowestTag));
Jump notInt = branch32(NotEqual, scratch, Imm32(JSValue::Int32Tag));
@@ -297,15 +302,17 @@ namespace JSC {
#endif
#if !USE(JSVALUE32_64)
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned index, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
{
- return addressFor(index, base);
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return addressFor(virtualRegisterIndex, base);
}
#endif
- inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned index, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned virtualRegisterIndex, RegisterID base)
{
- return Address(base, (index * sizeof(Register)));
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)));
}
}
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
index 69925a9..00f7aef 100644
--- a/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -127,11 +127,11 @@ namespace JSC {
ret();
}
- PassRefPtr<NativeExecutable> finalize()
+ MacroAssemblerCodePtr finalize(MacroAssemblerCodePtr fallback)
{
LinkBuffer patchBuffer(this, m_pool.get());
- patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs.ctiNativeCallThunk()->generatedJITCodeForCall().addressForCall()));
- return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ patchBuffer.link(m_failures, CodeLocationLabel(fallback));
+ return patchBuffer.finalizeCode().m_code;
}
private:
diff --git a/JavaScriptCore/jit/ThunkGenerators.cpp b/JavaScriptCore/jit/ThunkGenerators.cpp
index c625c3d..271c7c1 100644
--- a/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -25,8 +25,9 @@
#include "config.h"
#include "ThunkGenerators.h"
-#include <wtf/text/StringImpl.h>
+#include "CodeBlock.h"
+#include <wtf/text/StringImpl.h>
#include "SpecializedThunkJIT.h"
#if ENABLE(JIT)
@@ -62,59 +63,59 @@ static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, Mac
jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
}
-PassRefPtr<NativeExecutable> charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
SpecializedThunkJIT jit(1, globalData, pool);
stringCharLoad(jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize();
+ return jit.finalize(globalData->jitStubs.ctiNativeCall());
}
-PassRefPtr<NativeExecutable> charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
SpecializedThunkJIT jit(1, globalData, pool);
stringCharLoad(jit);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize();
+ return jit.finalize(globalData->jitStubs.ctiNativeCall());
}
-PassRefPtr<NativeExecutable> fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
SpecializedThunkJIT jit(1, globalData, pool);
// load char code
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize();
+ return jit.finalize(globalData->jitStubs.ctiNativeCall());
}
-PassRefPtr<NativeExecutable> sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
#if USE(JSVALUE64) || USE(JSVALUE32_64)
SpecializedThunkJIT jit(1, globalData, pool);
if (!jit.supportsFloatingPointSqrt())
- return globalData->jitStubs.ctiNativeCallThunk();
+ return globalData->jitStubs.ctiNativeCall();
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize();
+ return jit.finalize(globalData->jitStubs.ctiNativeCall());
#else
UNUSED_PARAM(pool);
- return globalData->jitStubs.ctiNativeCallThunk();
+ return globalData->jitStubs.ctiNativeCall();
#endif
}
static const double oneConstant = 1.0;
static const double negativeHalfConstant = -0.5;
-PassRefPtr<NativeExecutable> powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
#if USE(JSVALUE64) || USE(JSVALUE32_64)
SpecializedThunkJIT jit(2, globalData, pool);
if (!jit.supportsFloatingPoint())
- return globalData->jitStubs.ctiNativeCallThunk();
+ return globalData->jitStubs.ctiNativeCall();
jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
@@ -147,10 +148,10 @@ PassRefPtr<NativeExecutable> powThunkGenerator(JSGlobalData* globalData, Executa
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize();
+ return jit.finalize(globalData->jitStubs.ctiNativeCall());
#else
UNUSED_PARAM(pool);
- return globalData->jitStubs.ctiNativeCallThunk();
+ return globalData->jitStubs.ctiNativeCall();
#endif
}
diff --git a/JavaScriptCore/jit/ThunkGenerators.h b/JavaScriptCore/jit/ThunkGenerators.h
index c3374f2..15261f7 100644
--- a/JavaScriptCore/jit/ThunkGenerators.h
+++ b/JavaScriptCore/jit/ThunkGenerators.h
@@ -27,19 +27,18 @@
#define ThunkGenerators_h
#if ENABLE(JIT)
-#include <wtf/PassRefPtr.h>
-
namespace JSC {
class ExecutablePool;
class JSGlobalData;
class NativeExecutable;
+ class MacroAssemblerCodePtr;
- typedef PassRefPtr<NativeExecutable> (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
- PassRefPtr<NativeExecutable> charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
- PassRefPtr<NativeExecutable> charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
- PassRefPtr<NativeExecutable> fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
- PassRefPtr<NativeExecutable> sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
- PassRefPtr<NativeExecutable> powThunkGenerator(JSGlobalData*, ExecutablePool*);
+ typedef MacroAssemblerCodePtr (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData*, ExecutablePool*);
}
#endif